1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
28 * Copyright (c) 2018 Datto Inc.
29 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
30 * Copyright (c) 2017, Intel Corporation.
31 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
32 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
33 * Copyright (c) 2021, 2023, Klara Inc.
34 * Copyright (c) 2025 Hewlett Packard Enterprise Development LP.
35 */
36
37 #include <errno.h>
38 #include <libintl.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <strings.h>
42 #include <unistd.h>
43 #include <libgen.h>
44 #include <zone.h>
45 #include <sys/stat.h>
46 #include <sys/efi_partition.h>
47 #include <sys/systeminfo.h>
48 #include <sys/zfs_ioctl.h>
49 #include <sys/zfs_sysfs.h>
50 #include <sys/vdev_disk.h>
51 #include <sys/types.h>
52 #include <dlfcn.h>
53 #include <libzutil.h>
54 #include <fcntl.h>
55
56 #include "zfs_namecheck.h"
57 #include "zfs_prop.h"
58 #include "libzfs_impl.h"
59 #include "zfs_comutil.h"
60 #include "zfeature_common.h"
61
62 static boolean_t zpool_vdev_is_interior(const char *name);
63
64 typedef struct prop_flags {
65 unsigned int create:1; /* Validate property on creation */
66 unsigned int import:1; /* Validate property on import */
67 unsigned int vdevprop:1; /* Validate property as a VDEV property */
68 } prop_flags_t;
69
70 /*
71 * ====================================================================
72 * zpool property functions
73 * ====================================================================
74 */
75
76 static int
zpool_get_all_props(zpool_handle_t * zhp)77 zpool_get_all_props(zpool_handle_t *zhp)
78 {
79 zfs_cmd_t zc = {"\0"};
80 libzfs_handle_t *hdl = zhp->zpool_hdl;
81
82 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
83
84 if (zhp->zpool_n_propnames > 0) {
85 nvlist_t *innvl = fnvlist_alloc();
86 fnvlist_add_string_array(innvl, ZPOOL_GET_PROPS_NAMES,
87 zhp->zpool_propnames, zhp->zpool_n_propnames);
88 zcmd_write_src_nvlist(hdl, &zc, innvl);
89 fnvlist_free(innvl);
90 }
91
92 zcmd_alloc_dst_nvlist(hdl, &zc, 0);
93
94 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
95 if (errno == ENOMEM)
96 zcmd_expand_dst_nvlist(hdl, &zc);
97 else {
98 zcmd_free_nvlists(&zc);
99 return (-1);
100 }
101 }
102
103 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
104 zcmd_free_nvlists(&zc);
105 return (-1);
106 }
107
108 zcmd_free_nvlists(&zc);
109
110 return (0);
111 }
112
113 int
zpool_props_refresh(zpool_handle_t * zhp)114 zpool_props_refresh(zpool_handle_t *zhp)
115 {
116 nvlist_t *old_props;
117
118 old_props = zhp->zpool_props;
119
120 if (zpool_get_all_props(zhp) != 0)
121 return (-1);
122
123 nvlist_free(old_props);
124 return (0);
125 }
126
127 static const char *
zpool_get_prop_string(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)128 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
129 zprop_source_t *src)
130 {
131 nvlist_t *nv, *nvl;
132 const char *value;
133 zprop_source_t source;
134
135 nvl = zhp->zpool_props;
136 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
137 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
138 value = fnvlist_lookup_string(nv, ZPROP_VALUE);
139 } else {
140 source = ZPROP_SRC_DEFAULT;
141 if ((value = zpool_prop_default_string(prop)) == NULL)
142 value = "-";
143 }
144
145 if (src)
146 *src = source;
147
148 return (value);
149 }
150
151 uint64_t
zpool_get_prop_int(zpool_handle_t * zhp,zpool_prop_t prop,zprop_source_t * src)152 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
153 {
154 nvlist_t *nv, *nvl;
155 uint64_t value;
156 zprop_source_t source;
157
158 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
159 /*
160 * zpool_get_all_props() has most likely failed because
161 * the pool is faulted, but if all we need is the top level
162 * vdev's guid then get it from the zhp config nvlist.
163 */
164 if ((prop == ZPOOL_PROP_GUID) &&
165 (nvlist_lookup_nvlist(zhp->zpool_config,
166 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
167 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
168 == 0)) {
169 return (value);
170 }
171 return (zpool_prop_default_numeric(prop));
172 }
173
174 nvl = zhp->zpool_props;
175 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
176 source = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
177 value = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
178 } else {
179 source = ZPROP_SRC_DEFAULT;
180 value = zpool_prop_default_numeric(prop);
181 }
182
183 if (src)
184 *src = source;
185
186 return (value);
187 }
188
189 /*
190 * Map VDEV STATE to printed strings.
191 */
192 const char *
zpool_state_to_name(vdev_state_t state,vdev_aux_t aux)193 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
194 {
195 switch (state) {
196 case VDEV_STATE_CLOSED:
197 case VDEV_STATE_OFFLINE:
198 return (gettext("OFFLINE"));
199 case VDEV_STATE_REMOVED:
200 return (gettext("REMOVED"));
201 case VDEV_STATE_CANT_OPEN:
202 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
203 return (gettext("FAULTED"));
204 else if (aux == VDEV_AUX_SPLIT_POOL)
205 return (gettext("SPLIT"));
206 else
207 return (gettext("UNAVAIL"));
208 case VDEV_STATE_FAULTED:
209 return (gettext("FAULTED"));
210 case VDEV_STATE_DEGRADED:
211 return (gettext("DEGRADED"));
212 case VDEV_STATE_HEALTHY:
213 return (gettext("ONLINE"));
214
215 default:
216 break;
217 }
218
219 return (gettext("UNKNOWN"));
220 }
221
222 /*
223 * Map POOL STATE to printed strings.
224 */
225 const char *
zpool_pool_state_to_name(pool_state_t state)226 zpool_pool_state_to_name(pool_state_t state)
227 {
228 switch (state) {
229 default:
230 break;
231 case POOL_STATE_ACTIVE:
232 return (gettext("ACTIVE"));
233 case POOL_STATE_EXPORTED:
234 return (gettext("EXPORTED"));
235 case POOL_STATE_DESTROYED:
236 return (gettext("DESTROYED"));
237 case POOL_STATE_SPARE:
238 return (gettext("SPARE"));
239 case POOL_STATE_L2CACHE:
240 return (gettext("L2CACHE"));
241 case POOL_STATE_UNINITIALIZED:
242 return (gettext("UNINITIALIZED"));
243 case POOL_STATE_UNAVAIL:
244 return (gettext("UNAVAIL"));
245 case POOL_STATE_POTENTIALLY_ACTIVE:
246 return (gettext("POTENTIALLY_ACTIVE"));
247 }
248
249 return (gettext("UNKNOWN"));
250 }
251
252 /*
253 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
254 * "SUSPENDED", etc).
255 */
256 const char *
zpool_get_state_str(zpool_handle_t * zhp)257 zpool_get_state_str(zpool_handle_t *zhp)
258 {
259 zpool_errata_t errata;
260 zpool_status_t status;
261 const char *str;
262
263 status = zpool_get_status(zhp, NULL, &errata);
264
265 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
266 str = gettext("FAULTED");
267 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
268 status == ZPOOL_STATUS_IO_FAILURE_CONTINUE ||
269 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
270 str = gettext("SUSPENDED");
271 } else {
272 nvlist_t *nvroot = fnvlist_lookup_nvlist(
273 zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE);
274 uint_t vsc;
275 vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
276 nvroot, ZPOOL_CONFIG_VDEV_STATS, &vsc);
277 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
278 }
279 return (str);
280 }
281
282 /*
283 * Get a zpool property value for 'prop' and return the value in
284 * a pre-allocated buffer.
285 */
286 int
zpool_get_prop(zpool_handle_t * zhp,zpool_prop_t prop,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)287 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
288 size_t len, zprop_source_t *srctype, boolean_t literal)
289 {
290 uint64_t intval;
291 const char *strval;
292 zprop_source_t src = ZPROP_SRC_NONE;
293
294 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
295 switch (prop) {
296 case ZPOOL_PROP_NAME:
297 (void) strlcpy(buf, zpool_get_name(zhp), len);
298 break;
299
300 case ZPOOL_PROP_HEALTH:
301 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
302 break;
303
304 case ZPOOL_PROP_GUID:
305 intval = zpool_get_prop_int(zhp, prop, &src);
306 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
307 break;
308
309 case ZPOOL_PROP_ALTROOT:
310 case ZPOOL_PROP_CACHEFILE:
311 case ZPOOL_PROP_COMMENT:
312 case ZPOOL_PROP_COMPATIBILITY:
313 if (zhp->zpool_props != NULL ||
314 zpool_get_all_props(zhp) == 0) {
315 (void) strlcpy(buf,
316 zpool_get_prop_string(zhp, prop, &src),
317 len);
318 break;
319 }
320 zfs_fallthrough;
321 default:
322 (void) strlcpy(buf, "-", len);
323 break;
324 }
325
326 if (srctype != NULL)
327 *srctype = src;
328 return (0);
329 }
330
331 /*
332 * ZPOOL_PROP_DEDUPCACHED can be fetched by name only using
333 * the ZPOOL_GET_PROPS_NAMES mechanism
334 */
335 if (prop == ZPOOL_PROP_DEDUPCACHED) {
336 zpool_add_propname(zhp, ZPOOL_DEDUPCACHED_PROP_NAME);
337 (void) zpool_props_refresh(zhp);
338 }
339
340 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
341 prop != ZPOOL_PROP_NAME)
342 return (-1);
343
344 switch (zpool_prop_get_type(prop)) {
345 case PROP_TYPE_STRING:
346 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
347 len);
348 break;
349
350 case PROP_TYPE_NUMBER:
351 intval = zpool_get_prop_int(zhp, prop, &src);
352
353 switch (prop) {
354 case ZPOOL_PROP_DEDUP_TABLE_QUOTA:
355 /*
356 * If dedup quota is 0, we translate this into 'none'
357 * (unless literal is set). And if it is UINT64_MAX
358 * we translate that as 'automatic' (limit to size of
359 * the dedicated dedup VDEV. Otherwise, fall throught
360 * into the regular number formating.
361 */
362 if (intval == 0) {
363 (void) strlcpy(buf, literal ? "0" : "none",
364 len);
365 break;
366 } else if (intval == UINT64_MAX) {
367 (void) strlcpy(buf, "auto", len);
368 break;
369 }
370 zfs_fallthrough;
371
372 case ZPOOL_PROP_SIZE:
373 case ZPOOL_PROP_NORMAL_SIZE:
374 case ZPOOL_PROP_SPECIAL_SIZE:
375 case ZPOOL_PROP_DEDUP_SIZE:
376 case ZPOOL_PROP_LOG_SIZE:
377 case ZPOOL_PROP_ELOG_SIZE:
378 case ZPOOL_PROP_SELOG_SIZE:
379 case ZPOOL_PROP_ALLOCATED:
380 case ZPOOL_PROP_NORMAL_ALLOCATED:
381 case ZPOOL_PROP_SPECIAL_ALLOCATED:
382 case ZPOOL_PROP_DEDUP_ALLOCATED:
383 case ZPOOL_PROP_LOG_ALLOCATED:
384 case ZPOOL_PROP_ELOG_ALLOCATED:
385 case ZPOOL_PROP_SELOG_ALLOCATED:
386 case ZPOOL_PROP_AVAILABLE:
387 case ZPOOL_PROP_NORMAL_AVAILABLE:
388 case ZPOOL_PROP_SPECIAL_AVAILABLE:
389 case ZPOOL_PROP_DEDUP_AVAILABLE:
390 case ZPOOL_PROP_LOG_AVAILABLE:
391 case ZPOOL_PROP_ELOG_AVAILABLE:
392 case ZPOOL_PROP_SELOG_AVAILABLE:
393 case ZPOOL_PROP_FREE:
394 case ZPOOL_PROP_NORMAL_FREE:
395 case ZPOOL_PROP_SPECIAL_FREE:
396 case ZPOOL_PROP_DEDUP_FREE:
397 case ZPOOL_PROP_LOG_FREE:
398 case ZPOOL_PROP_ELOG_FREE:
399 case ZPOOL_PROP_SELOG_FREE:
400 case ZPOOL_PROP_USABLE:
401 case ZPOOL_PROP_NORMAL_USABLE:
402 case ZPOOL_PROP_SPECIAL_USABLE:
403 case ZPOOL_PROP_DEDUP_USABLE:
404 case ZPOOL_PROP_LOG_USABLE:
405 case ZPOOL_PROP_ELOG_USABLE:
406 case ZPOOL_PROP_SELOG_USABLE:
407 case ZPOOL_PROP_USED:
408 case ZPOOL_PROP_NORMAL_USED:
409 case ZPOOL_PROP_SPECIAL_USED:
410 case ZPOOL_PROP_DEDUP_USED:
411 case ZPOOL_PROP_LOG_USED:
412 case ZPOOL_PROP_ELOG_USED:
413 case ZPOOL_PROP_SELOG_USED:
414 case ZPOOL_PROP_FREEING:
415 case ZPOOL_PROP_LEAKED:
416 case ZPOOL_PROP_ASHIFT:
417 case ZPOOL_PROP_MAXBLOCKSIZE:
418 case ZPOOL_PROP_MAXDNODESIZE:
419 case ZPOOL_PROP_BCLONESAVED:
420 case ZPOOL_PROP_BCLONEUSED:
421 case ZPOOL_PROP_DEDUP_TABLE_SIZE:
422 case ZPOOL_PROP_DEDUPUSED:
423 case ZPOOL_PROP_DEDUPSAVED:
424 case ZPOOL_PROP_DEDUPCACHED:
425 if (literal)
426 (void) snprintf(buf, len, "%llu",
427 (u_longlong_t)intval);
428 else
429 (void) zfs_nicenum(intval, buf, len);
430 break;
431
432 case ZPOOL_PROP_EXPANDSZ:
433 case ZPOOL_PROP_NORMAL_EXPANDSZ:
434 case ZPOOL_PROP_SPECIAL_EXPANDSZ:
435 case ZPOOL_PROP_DEDUP_EXPANDSZ:
436 case ZPOOL_PROP_LOG_EXPANDSZ:
437 case ZPOOL_PROP_ELOG_EXPANDSZ:
438 case ZPOOL_PROP_SELOG_EXPANDSZ:
439 case ZPOOL_PROP_CHECKPOINT:
440 if (intval == 0) {
441 (void) strlcpy(buf, "-", len);
442 } else if (literal) {
443 (void) snprintf(buf, len, "%llu",
444 (u_longlong_t)intval);
445 } else {
446 (void) zfs_nicebytes(intval, buf, len);
447 }
448 break;
449
450 case ZPOOL_PROP_CAPACITY:
451 case ZPOOL_PROP_NORMAL_CAPACITY:
452 case ZPOOL_PROP_SPECIAL_CAPACITY:
453 case ZPOOL_PROP_DEDUP_CAPACITY:
454 case ZPOOL_PROP_LOG_CAPACITY:
455 case ZPOOL_PROP_ELOG_CAPACITY:
456 case ZPOOL_PROP_SELOG_CAPACITY:
457 if (literal) {
458 (void) snprintf(buf, len, "%llu",
459 (u_longlong_t)intval);
460 } else {
461 (void) snprintf(buf, len, "%llu%%",
462 (u_longlong_t)intval);
463 }
464 break;
465
466 case ZPOOL_PROP_FRAGMENTATION:
467 case ZPOOL_PROP_NORMAL_FRAGMENTATION:
468 case ZPOOL_PROP_SPECIAL_FRAGMENTATION:
469 case ZPOOL_PROP_DEDUP_FRAGMENTATION:
470 case ZPOOL_PROP_LOG_FRAGMENTATION:
471 case ZPOOL_PROP_ELOG_FRAGMENTATION:
472 case ZPOOL_PROP_SELOG_FRAGMENTATION:
473 if (intval == ZFS_FRAG_INVALID) {
474 (void) strlcpy(buf, "-", len);
475 } else if (literal) {
476 (void) snprintf(buf, len, "%llu",
477 (u_longlong_t)intval);
478 } else {
479 (void) snprintf(buf, len, "%llu%%",
480 (u_longlong_t)intval);
481 }
482 break;
483
484 case ZPOOL_PROP_BCLONERATIO:
485 case ZPOOL_PROP_DEDUPRATIO:
486 if (literal)
487 (void) snprintf(buf, len, "%llu.%02llu",
488 (u_longlong_t)(intval / 100),
489 (u_longlong_t)(intval % 100));
490 else
491 (void) snprintf(buf, len, "%llu.%02llux",
492 (u_longlong_t)(intval / 100),
493 (u_longlong_t)(intval % 100));
494 break;
495
496 case ZPOOL_PROP_HEALTH:
497 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
498 break;
499 case ZPOOL_PROP_VERSION:
500 if (intval >= SPA_VERSION_FEATURES) {
501 (void) snprintf(buf, len, "-");
502 break;
503 }
504 zfs_fallthrough;
505 default:
506 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
507 }
508 break;
509
510 case PROP_TYPE_INDEX:
511 intval = zpool_get_prop_int(zhp, prop, &src);
512 if (zpool_prop_index_to_string(prop, intval, &strval)
513 != 0)
514 return (-1);
515 (void) strlcpy(buf, strval, len);
516 break;
517
518 default:
519 abort();
520 }
521
522 if (srctype)
523 *srctype = src;
524
525 return (0);
526 }
527
528 /*
529 * Get a zpool property value for 'propname' and return the value in
530 * a pre-allocated buffer.
531 */
532 int
zpool_get_userprop(zpool_handle_t * zhp,const char * propname,char * buf,size_t len,zprop_source_t * srctype)533 zpool_get_userprop(zpool_handle_t *zhp, const char *propname, char *buf,
534 size_t len, zprop_source_t *srctype)
535 {
536 nvlist_t *nv;
537 uint64_t ival;
538 const char *value;
539 zprop_source_t source = ZPROP_SRC_LOCAL;
540
541 if (zhp->zpool_props == NULL)
542 zpool_get_all_props(zhp);
543
544 if (nvlist_lookup_nvlist(zhp->zpool_props, propname, &nv) == 0) {
545 if (nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0)
546 source = ival;
547 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
548 } else {
549 source = ZPROP_SRC_DEFAULT;
550 value = "-";
551 }
552
553 if (srctype)
554 *srctype = source;
555
556 (void) strlcpy(buf, value, len);
557
558 return (0);
559 }
560
561 /*
562 * Check if the bootfs name has the same pool name as it is set to.
563 * Assuming bootfs is a valid dataset name.
564 */
565 static boolean_t
bootfs_name_valid(const char * pool,const char * bootfs)566 bootfs_name_valid(const char *pool, const char *bootfs)
567 {
568 int len = strlen(pool);
569 if (bootfs[0] == '\0')
570 return (B_TRUE);
571
572 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
573 return (B_FALSE);
574
575 if (strncmp(pool, bootfs, len) == 0 &&
576 (bootfs[len] == '/' || bootfs[len] == '\0'))
577 return (B_TRUE);
578
579 return (B_FALSE);
580 }
581
582 /*
583 * Given an nvlist of zpool properties to be set, validate that they are
584 * correct, and parse any numeric properties (index, boolean, etc) if they are
585 * specified as strings.
586 */
587 static nvlist_t *
zpool_valid_proplist(libzfs_handle_t * hdl,const char * poolname,nvlist_t * props,uint64_t version,prop_flags_t flags,char * errbuf)588 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
589 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
590 {
591 nvpair_t *elem;
592 nvlist_t *retprops;
593 zpool_prop_t prop;
594 const char *strval;
595 uint64_t intval;
596 const char *check;
597 struct stat64 statbuf;
598 zpool_handle_t *zhp;
599 char *parent, *slash;
600 char report[1024];
601
602 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
603 (void) no_memory(hdl);
604 return (NULL);
605 }
606
607 elem = NULL;
608 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
609 const char *propname = nvpair_name(elem);
610
611 if (flags.vdevprop && zpool_prop_vdev(propname)) {
612 vdev_prop_t vprop = vdev_name_to_prop(propname);
613
614 if (vdev_prop_readonly(vprop)) {
615 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
616 "is readonly"), propname);
617 (void) zfs_error(hdl, EZFS_PROPREADONLY,
618 errbuf);
619 goto error;
620 }
621
622 if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
623 retprops, &strval, &intval, errbuf) != 0)
624 goto error;
625
626 continue;
627 } else if (flags.vdevprop && vdev_prop_user(propname)) {
628 if (nvlist_add_nvpair(retprops, elem) != 0) {
629 (void) no_memory(hdl);
630 goto error;
631 }
632 continue;
633 } else if (flags.vdevprop) {
634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
635 "invalid property: '%s'"), propname);
636 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
637 goto error;
638 }
639
640 prop = zpool_name_to_prop(propname);
641 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
642 int err;
643 char *fname = strchr(propname, '@') + 1;
644
645 err = zfeature_lookup_name(fname, NULL);
646 if (err != 0) {
647 ASSERT3U(err, ==, ENOENT);
648 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
649 "feature '%s' unsupported by kernel"),
650 fname);
651 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
652 goto error;
653 }
654
655 if (nvpair_type(elem) != DATA_TYPE_STRING) {
656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
657 "'%s' must be a string"), propname);
658 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
659 goto error;
660 }
661
662 (void) nvpair_value_string(elem, &strval);
663 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
664 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
665 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
666 "property '%s' can only be set to "
667 "'enabled' or 'disabled'"), propname);
668 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
669 goto error;
670 }
671
672 if (!flags.create &&
673 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
675 "property '%s' can only be set to "
676 "'disabled' at creation time"), propname);
677 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
678 goto error;
679 }
680
681 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
682 (void) no_memory(hdl);
683 goto error;
684 }
685 continue;
686 } else if (prop == ZPOOL_PROP_INVAL &&
687 zfs_prop_user(propname)) {
688 /*
689 * This is a user property: make sure it's a
690 * string, and that it's less than ZAP_MAXNAMELEN.
691 */
692 if (nvpair_type(elem) != DATA_TYPE_STRING) {
693 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
694 "'%s' must be a string"), propname);
695 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
696 goto error;
697 }
698
699 if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
701 "property name '%s' is too long"),
702 propname);
703 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
704 goto error;
705 }
706
707 (void) nvpair_value_string(elem, &strval);
708
709 if (strlen(strval) >= ZFS_MAXPROPLEN) {
710 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
711 "property value '%s' is too long"),
712 strval);
713 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
714 goto error;
715 }
716
717 if (nvlist_add_string(retprops, propname,
718 strval) != 0) {
719 (void) no_memory(hdl);
720 goto error;
721 }
722
723 continue;
724 }
725
726 /*
727 * Make sure this property is valid and applies to this type.
728 */
729 if (prop == ZPOOL_PROP_INVAL) {
730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
731 "invalid property '%s'"), propname);
732 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
733 goto error;
734 }
735
736 if (zpool_prop_readonly(prop)) {
737 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
738 "is readonly"), propname);
739 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
740 goto error;
741 }
742
743 if (!flags.create && zpool_prop_setonce(prop)) {
744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
745 "property '%s' can only be set at "
746 "creation time"), propname);
747 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
748 goto error;
749 }
750
751 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
752 &strval, &intval, errbuf) != 0)
753 goto error;
754
755 /*
756 * Perform additional checking for specific properties.
757 */
758 switch (prop) {
759 case ZPOOL_PROP_VERSION:
760 if (intval < version ||
761 !SPA_VERSION_IS_SUPPORTED(intval)) {
762 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
763 "property '%s' number %llu is invalid."),
764 propname, (unsigned long long)intval);
765 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
766 goto error;
767 }
768 break;
769
770 case ZPOOL_PROP_ASHIFT:
771 if (intval != 0 &&
772 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
774 "property '%s' number %llu is invalid, "
775 "only values between %" PRId32 " and %"
776 PRId32 " are allowed."),
777 propname, (unsigned long long)intval,
778 ASHIFT_MIN, ASHIFT_MAX);
779 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
780 goto error;
781 }
782 break;
783
784 case ZPOOL_PROP_BOOTFS:
785 if (flags.create || flags.import) {
786 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
787 "property '%s' cannot be set at creation "
788 "or import time"), propname);
789 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
790 goto error;
791 }
792
793 if (version < SPA_VERSION_BOOTFS) {
794 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
795 "pool must be upgraded to support "
796 "'%s' property"), propname);
797 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
798 goto error;
799 }
800
801 /*
802 * bootfs property value has to be a dataset name and
803 * the dataset has to be in the same pool as it sets to.
804 */
805 if (!bootfs_name_valid(poolname, strval)) {
806 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
807 "is an invalid name"), strval);
808 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
809 goto error;
810 }
811
812 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
813 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
814 "could not open pool '%s'"), poolname);
815 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
816 goto error;
817 }
818 zpool_close(zhp);
819 break;
820
821 case ZPOOL_PROP_ALTROOT:
822 if (!flags.create && !flags.import) {
823 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
824 "property '%s' can only be set during pool "
825 "creation or import"), propname);
826 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
827 goto error;
828 }
829
830 if (strval[0] != '/') {
831 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
832 "bad alternate root '%s'"), strval);
833 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
834 goto error;
835 }
836 break;
837
838 case ZPOOL_PROP_CACHEFILE:
839 if (strval[0] == '\0')
840 break;
841
842 if (strcmp(strval, "none") == 0)
843 break;
844
845 if (strval[0] != '/') {
846 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
847 "property '%s' must be empty, an "
848 "absolute path, or 'none'"), propname);
849 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
850 goto error;
851 }
852
853 parent = strdup(strval);
854 if (parent == NULL) {
855 (void) zfs_error(hdl, EZFS_NOMEM, errbuf);
856 goto error;
857 }
858 slash = strrchr(parent, '/');
859
860 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
861 strcmp(slash, "/..") == 0) {
862 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
863 "'%s' is not a valid file"), parent);
864 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
865 free(parent);
866 goto error;
867 }
868
869 *slash = '\0';
870
871 if (parent[0] != '\0' &&
872 (stat64(parent, &statbuf) != 0 ||
873 !S_ISDIR(statbuf.st_mode))) {
874 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
875 "'%s' is not a valid directory"),
876 parent);
877 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
878 free(parent);
879 goto error;
880 }
881 free(parent);
882
883 break;
884
885 case ZPOOL_PROP_COMPATIBILITY:
886 switch (zpool_load_compat(strval, NULL, report, 1024)) {
887 case ZPOOL_COMPATIBILITY_OK:
888 case ZPOOL_COMPATIBILITY_WARNTOKEN:
889 break;
890 case ZPOOL_COMPATIBILITY_BADFILE:
891 case ZPOOL_COMPATIBILITY_BADTOKEN:
892 case ZPOOL_COMPATIBILITY_NOFILES:
893 zfs_error_aux(hdl, "%s", report);
894 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
895 goto error;
896 }
897 break;
898
899 case ZPOOL_PROP_COMMENT:
900 for (check = strval; *check != '\0'; check++) {
901 if (!isprint(*check)) {
902 zfs_error_aux(hdl,
903 dgettext(TEXT_DOMAIN,
904 "comment may only have printable "
905 "characters"));
906 (void) zfs_error(hdl, EZFS_BADPROP,
907 errbuf);
908 goto error;
909 }
910 }
911 if (strlen(strval) > ZPROP_MAX_COMMENT) {
912 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
913 "comment must not exceed %d characters"),
914 ZPROP_MAX_COMMENT);
915 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
916 goto error;
917 }
918 break;
919 case ZPOOL_PROP_READONLY:
920 if (!flags.import) {
921 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
922 "property '%s' can only be set at "
923 "import time"), propname);
924 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
925 goto error;
926 }
927 break;
928 case ZPOOL_PROP_MULTIHOST:
929 if (get_system_hostid() == 0) {
930 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
931 "requires a non-zero system hostid"));
932 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
933 goto error;
934 }
935 break;
936 case ZPOOL_PROP_DEDUPDITTO:
937 printf("Note: property '%s' no longer has "
938 "any effect\n", propname);
939 break;
940
941 default:
942 break;
943 }
944 }
945
946 return (retprops);
947 error:
948 nvlist_free(retprops);
949 return (NULL);
950 }
951
952 /*
953 * Set zpool property : propname=propval.
954 */
955 int
zpool_set_prop(zpool_handle_t * zhp,const char * propname,const char * propval)956 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
957 {
958 zfs_cmd_t zc = {"\0"};
959 int ret;
960 char errbuf[ERRBUFLEN];
961 nvlist_t *nvl = NULL;
962 nvlist_t *realprops;
963 uint64_t version;
964 prop_flags_t flags = { 0 };
965
966 (void) snprintf(errbuf, sizeof (errbuf),
967 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
968 zhp->zpool_name);
969
970 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
971 return (no_memory(zhp->zpool_hdl));
972
973 if (nvlist_add_string(nvl, propname, propval) != 0) {
974 nvlist_free(nvl);
975 return (no_memory(zhp->zpool_hdl));
976 }
977
978 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
979 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
980 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
981 nvlist_free(nvl);
982 return (-1);
983 }
984
985 nvlist_free(nvl);
986 nvl = realprops;
987
988 /*
989 * Execute the corresponding ioctl() to set this property.
990 */
991 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
992
993 zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl);
994
995 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
996
997 zcmd_free_nvlists(&zc);
998 nvlist_free(nvl);
999
1000 if (ret)
1001 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
1002 else
1003 (void) zpool_props_refresh(zhp);
1004
1005 return (ret);
1006 }
1007
1008 int
zpool_expand_proplist(zpool_handle_t * zhp,zprop_list_t ** plp,zfs_type_t type,boolean_t literal)1009 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
1010 zfs_type_t type, boolean_t literal)
1011 {
1012 libzfs_handle_t *hdl = zhp->zpool_hdl;
1013 zprop_list_t *entry;
1014 char buf[ZFS_MAXPROPLEN];
1015 nvlist_t *features = NULL;
1016 nvpair_t *nvp;
1017 zprop_list_t **last;
1018 boolean_t firstexpand = (NULL == *plp);
1019 int i;
1020
1021 if (zprop_expand_list(hdl, plp, type) != 0)
1022 return (-1);
1023
1024 if (type == ZFS_TYPE_VDEV)
1025 return (0);
1026
1027 last = plp;
1028 while (*last != NULL)
1029 last = &(*last)->pl_next;
1030
1031 if ((*plp)->pl_all)
1032 features = zpool_get_features(zhp);
1033
1034 if ((*plp)->pl_all && firstexpand) {
1035 /* Handle userprops in the all properties case */
1036 if (zhp->zpool_props == NULL && zpool_props_refresh(zhp))
1037 return (-1);
1038
1039 nvp = NULL;
1040 while ((nvp = nvlist_next_nvpair(zhp->zpool_props, nvp)) !=
1041 NULL) {
1042 const char *propname = nvpair_name(nvp);
1043
1044 if (!zfs_prop_user(propname))
1045 continue;
1046
1047 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1048 entry->pl_prop = ZPROP_USERPROP;
1049 entry->pl_user_prop = zfs_strdup(hdl, propname);
1050 entry->pl_width = strlen(entry->pl_user_prop);
1051 entry->pl_all = B_TRUE;
1052
1053 *last = entry;
1054 last = &entry->pl_next;
1055 }
1056
1057 for (i = 0; i < SPA_FEATURES; i++) {
1058 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1059 entry->pl_prop = ZPROP_USERPROP;
1060 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
1061 spa_feature_table[i].fi_uname);
1062 entry->pl_width = strlen(entry->pl_user_prop);
1063 entry->pl_all = B_TRUE;
1064
1065 *last = entry;
1066 last = &entry->pl_next;
1067 }
1068 }
1069
1070 /* add any unsupported features */
1071 for (nvp = nvlist_next_nvpair(features, NULL);
1072 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
1073 char *propname;
1074 boolean_t found;
1075
1076 if (zfeature_is_supported(nvpair_name(nvp)))
1077 continue;
1078
1079 propname = zfs_asprintf(hdl, "unsupported@%s",
1080 nvpair_name(nvp));
1081
1082 /*
1083 * Before adding the property to the list make sure that no
1084 * other pool already added the same property.
1085 */
1086 found = B_FALSE;
1087 entry = *plp;
1088 while (entry != NULL) {
1089 if (entry->pl_user_prop != NULL &&
1090 strcmp(propname, entry->pl_user_prop) == 0) {
1091 found = B_TRUE;
1092 break;
1093 }
1094 entry = entry->pl_next;
1095 }
1096 if (found) {
1097 free(propname);
1098 continue;
1099 }
1100
1101 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
1102 entry->pl_prop = ZPROP_USERPROP;
1103 entry->pl_user_prop = propname;
1104 entry->pl_width = strlen(entry->pl_user_prop);
1105 entry->pl_all = B_TRUE;
1106
1107 *last = entry;
1108 last = &entry->pl_next;
1109 }
1110
1111 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1112 if (entry->pl_fixed && !literal)
1113 continue;
1114
1115 if (entry->pl_prop != ZPROP_USERPROP &&
1116 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
1117 NULL, literal) == 0) {
1118 if (strlen(buf) > entry->pl_width)
1119 entry->pl_width = strlen(buf);
1120 } else if (entry->pl_prop == ZPROP_INVAL &&
1121 zfs_prop_user(entry->pl_user_prop) &&
1122 zpool_get_userprop(zhp, entry->pl_user_prop, buf,
1123 sizeof (buf), NULL) == 0) {
1124 if (strlen(buf) > entry->pl_width)
1125 entry->pl_width = strlen(buf);
1126 }
1127 }
1128
1129 return (0);
1130 }
1131
1132 int
vdev_expand_proplist(zpool_handle_t * zhp,const char * vdevname,zprop_list_t ** plp)1133 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
1134 zprop_list_t **plp)
1135 {
1136 zprop_list_t *entry;
1137 char buf[ZFS_MAXPROPLEN];
1138 const char *strval = NULL;
1139 int err = 0;
1140 nvpair_t *elem = NULL;
1141 nvlist_t *vprops = NULL;
1142 nvlist_t *propval = NULL;
1143 const char *propname;
1144 vdev_prop_t prop;
1145 zprop_list_t **last;
1146
1147 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
1148 if (entry->pl_fixed)
1149 continue;
1150
1151 if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
1152 entry->pl_user_prop, buf, sizeof (buf), NULL,
1153 B_FALSE) == 0) {
1154 if (strlen(buf) > entry->pl_width)
1155 entry->pl_width = strlen(buf);
1156 }
1157 if (entry->pl_prop == VDEV_PROP_NAME &&
1158 strlen(vdevname) > entry->pl_width)
1159 entry->pl_width = strlen(vdevname);
1160 }
1161
1162 /* Handle the all properties case */
1163 last = plp;
1164 if (*last != NULL && (*last)->pl_all == B_TRUE) {
1165 while (*last != NULL)
1166 last = &(*last)->pl_next;
1167
1168 err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
1169 if (err != 0)
1170 return (err);
1171
1172 while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
1173 propname = nvpair_name(elem);
1174
1175 /* Skip properties that are not user defined */
1176 if ((prop = vdev_name_to_prop(propname)) !=
1177 VDEV_PROP_USERPROP)
1178 continue;
1179
1180 if (nvpair_value_nvlist(elem, &propval) != 0)
1181 continue;
1182
1183 strval = fnvlist_lookup_string(propval, ZPROP_VALUE);
1184
1185 entry = zfs_alloc(zhp->zpool_hdl,
1186 sizeof (zprop_list_t));
1187 entry->pl_prop = prop;
1188 entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
1189 propname);
1190 entry->pl_width = strlen(strval);
1191 entry->pl_all = B_TRUE;
1192 *last = entry;
1193 last = &entry->pl_next;
1194 }
1195 }
1196
1197 return (0);
1198 }
1199
1200 /*
1201 * Get the state for the given feature on the given ZFS pool.
1202 */
1203 int
zpool_prop_get_feature(zpool_handle_t * zhp,const char * propname,char * buf,size_t len)1204 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
1205 size_t len)
1206 {
1207 uint64_t refcount;
1208 boolean_t found = B_FALSE;
1209 nvlist_t *features = zpool_get_features(zhp);
1210 boolean_t supported;
1211 const char *feature = strchr(propname, '@') + 1;
1212
1213 supported = zpool_prop_feature(propname);
1214 ASSERT(supported || zpool_prop_unsupported(propname));
1215
1216 /*
1217 * Convert from feature name to feature guid. This conversion is
1218 * unnecessary for unsupported@... properties because they already
1219 * use guids.
1220 */
1221 if (supported) {
1222 int ret;
1223 spa_feature_t fid;
1224
1225 ret = zfeature_lookup_name(feature, &fid);
1226 if (ret != 0) {
1227 (void) strlcpy(buf, "-", len);
1228 return (ENOTSUP);
1229 }
1230 feature = spa_feature_table[fid].fi_guid;
1231 }
1232
1233 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
1234 found = B_TRUE;
1235
1236 if (supported) {
1237 if (!found) {
1238 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
1239 } else {
1240 if (refcount == 0)
1241 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
1242 else
1243 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
1244 }
1245 } else {
1246 if (found) {
1247 if (refcount == 0) {
1248 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
1249 } else {
1250 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
1251 }
1252 } else {
1253 (void) strlcpy(buf, "-", len);
1254 return (ENOTSUP);
1255 }
1256 }
1257
1258 return (0);
1259 }
1260
1261 /*
1262 * Validate the given pool name, optionally putting an extended error message in
1263 * 'buf'.
1264 */
1265 boolean_t
zpool_name_valid(libzfs_handle_t * hdl,boolean_t isopen,const char * pool)1266 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
1267 {
1268 namecheck_err_t why;
1269 char what;
1270 int ret;
1271
1272 ret = pool_namecheck(pool, &why, &what);
1273
1274 /*
1275 * The rules for reserved pool names were extended at a later point.
1276 * But we need to support users with existing pools that may now be
1277 * invalid. So we only check for this expanded set of names during a
1278 * create (or import), and only in userland.
1279 */
1280 if (ret == 0 && !isopen &&
1281 (strncmp(pool, "mirror", 6) == 0 ||
1282 strncmp(pool, "raidz", 5) == 0 ||
1283 strncmp(pool, "draid", 5) == 0 ||
1284 strncmp(pool, "spare", 5) == 0 ||
1285 strcmp(pool, "log") == 0)) {
1286 if (hdl != NULL)
1287 zfs_error_aux(hdl,
1288 dgettext(TEXT_DOMAIN, "name is reserved"));
1289 return (B_FALSE);
1290 }
1291
1292
1293 if (ret != 0) {
1294 if (hdl != NULL) {
1295 switch (why) {
1296 case NAME_ERR_TOOLONG:
1297 zfs_error_aux(hdl,
1298 dgettext(TEXT_DOMAIN, "name is too long"));
1299 break;
1300
1301 case NAME_ERR_INVALCHAR:
1302 zfs_error_aux(hdl,
1303 dgettext(TEXT_DOMAIN, "invalid character "
1304 "'%c' in pool name"), what);
1305 break;
1306
1307 case NAME_ERR_NOLETTER:
1308 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1309 "name must begin with a letter"));
1310 break;
1311
1312 case NAME_ERR_RESERVED:
1313 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1314 "name is reserved"));
1315 break;
1316
1317 case NAME_ERR_DISKLIKE:
1318 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1319 "pool name is reserved"));
1320 break;
1321
1322 case NAME_ERR_LEADING_SLASH:
1323 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1324 "leading slash in name"));
1325 break;
1326
1327 case NAME_ERR_EMPTY_COMPONENT:
1328 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1329 "empty component in name"));
1330 break;
1331
1332 case NAME_ERR_TRAILING_SLASH:
1333 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1334 "trailing slash in name"));
1335 break;
1336
1337 case NAME_ERR_MULTIPLE_DELIMITERS:
1338 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1339 "multiple '@' and/or '#' delimiters in "
1340 "name"));
1341 break;
1342
1343 case NAME_ERR_NO_AT:
1344 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1345 "permission set is missing '@'"));
1346 break;
1347
1348 default:
1349 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1350 "(%d) not defined"), why);
1351 break;
1352 }
1353 }
1354 return (B_FALSE);
1355 }
1356
1357 return (B_TRUE);
1358 }
1359
1360 /*
1361 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1362 * state.
1363 */
1364 zpool_handle_t *
zpool_open_canfail(libzfs_handle_t * hdl,const char * pool)1365 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1366 {
1367 zpool_handle_t *zhp;
1368 boolean_t missing;
1369
1370 /*
1371 * Make sure the pool name is valid.
1372 */
1373 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1374 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1375 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1376 pool);
1377 return (NULL);
1378 }
1379
1380 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1381
1382 zhp->zpool_hdl = hdl;
1383 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1384
1385 if (zpool_refresh_stats(zhp, &missing) != 0) {
1386 zpool_close(zhp);
1387 return (NULL);
1388 }
1389
1390 if (missing) {
1391 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1392 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1393 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1394 zpool_close(zhp);
1395 return (NULL);
1396 }
1397
1398 return (zhp);
1399 }
1400
1401 /*
1402 * Like the above, but silent on error. Used when iterating over pools (because
1403 * the configuration cache may be out of date).
1404 */
1405 int
zpool_open_silent(libzfs_handle_t * hdl,const char * pool,zpool_handle_t ** ret)1406 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1407 {
1408 zpool_handle_t *zhp;
1409 boolean_t missing;
1410
1411 zhp = zfs_alloc(hdl, sizeof (zpool_handle_t));
1412
1413 zhp->zpool_hdl = hdl;
1414 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1415
1416 if (zpool_refresh_stats(zhp, &missing) != 0) {
1417 zpool_close(zhp);
1418 return (-1);
1419 }
1420
1421 if (missing) {
1422 zpool_close(zhp);
1423 *ret = NULL;
1424 return (0);
1425 }
1426
1427 *ret = zhp;
1428 return (0);
1429 }
1430
1431 /*
1432 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1433 * state.
1434 */
1435 zpool_handle_t *
zpool_open(libzfs_handle_t * hdl,const char * pool)1436 zpool_open(libzfs_handle_t *hdl, const char *pool)
1437 {
1438 zpool_handle_t *zhp;
1439
1440 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1441 return (NULL);
1442
1443 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1444 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1445 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1446 zpool_close(zhp);
1447 return (NULL);
1448 }
1449
1450 return (zhp);
1451 }
1452
1453 /*
1454 * Close the handle. Simply frees the memory associated with the handle.
1455 */
1456 void
zpool_close(zpool_handle_t * zhp)1457 zpool_close(zpool_handle_t *zhp)
1458 {
1459 nvlist_free(zhp->zpool_config);
1460 nvlist_free(zhp->zpool_old_config);
1461 nvlist_free(zhp->zpool_props);
1462 free(zhp);
1463 }
1464
1465 /*
1466 * Return the name of the pool.
1467 */
1468 const char *
zpool_get_name(zpool_handle_t * zhp)1469 zpool_get_name(zpool_handle_t *zhp)
1470 {
1471 return (zhp->zpool_name);
1472 }
1473
1474
1475 /*
1476 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1477 */
1478 int
zpool_get_state(zpool_handle_t * zhp)1479 zpool_get_state(zpool_handle_t *zhp)
1480 {
1481 return (zhp->zpool_state);
1482 }
1483
1484 /*
1485 * Check if vdev list contains a dRAID vdev
1486 */
1487 static boolean_t
zpool_has_draid_vdev(nvlist_t * nvroot)1488 zpool_has_draid_vdev(nvlist_t *nvroot)
1489 {
1490 nvlist_t **child;
1491 uint_t children;
1492
1493 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1494 &child, &children) == 0) {
1495 for (uint_t c = 0; c < children; c++) {
1496 const char *type;
1497
1498 if (nvlist_lookup_string(child[c],
1499 ZPOOL_CONFIG_TYPE, &type) == 0 &&
1500 strcmp(type, VDEV_TYPE_DRAID) == 0) {
1501 return (B_TRUE);
1502 }
1503 }
1504 }
1505 return (B_FALSE);
1506 }
1507
1508 /*
1509 * Output a dRAID top-level vdev name in to the provided buffer.
1510 */
1511 static char *
zpool_draid_name(char * name,int len,uint64_t data,uint64_t parity,uint64_t spares,uint64_t children,uint64_t width)1512 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
1513 uint64_t spares, uint64_t children, uint64_t width)
1514 {
1515 if (children < width)
1516 snprintf(name, len, "%s%llu:%llud:%lluc:%lluw:%llus",
1517 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1518 (u_longlong_t)children, (u_longlong_t)width,
1519 (u_longlong_t)spares);
1520 else
1521 snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
1522 VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1523 (u_longlong_t)children, (u_longlong_t)spares);
1524
1525 return (name);
1526 }
1527
1528 /*
1529 * Return B_TRUE if the provided name is a dRAID spare name.
1530 */
1531 boolean_t
zpool_is_draid_spare(const char * name)1532 zpool_is_draid_spare(const char *name)
1533 {
1534 uint64_t spare_id, parity, vdev_id;
1535
1536 if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
1537 (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
1538 (u_longlong_t *)&spare_id) == 3) {
1539 return (B_TRUE);
1540 }
1541
1542 return (B_FALSE);
1543 }
1544
1545
1546 /*
1547 * Extract device-specific error information from a failed pool creation.
1548 * If the kernel returned ZPOOL_CONFIG_CREATE_INFO in the ioctl output,
1549 * set an appropriate error aux message identifying the problematic device.
1550 */
1551 static int
zpool_create_info(libzfs_handle_t * hdl,zfs_cmd_t * zc)1552 zpool_create_info(libzfs_handle_t *hdl, zfs_cmd_t *zc)
1553 {
1554 nvlist_t *outnv = NULL;
1555 nvlist_t *info = NULL;
1556 const char *vdev = NULL;
1557 const char *pname = NULL;
1558
1559 if (zc->zc_nvlist_dst_size == 0)
1560 return (ENOENT);
1561
1562 if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
1563 zc->zc_nvlist_dst_size, &outnv, 0) != 0 || outnv == NULL)
1564 return (EINVAL);
1565
1566 if (nvlist_lookup_nvlist(outnv, ZPOOL_CONFIG_CREATE_INFO, &info) != 0) {
1567 nvlist_free(outnv);
1568 return (EINVAL);
1569 }
1570
1571 if (nvlist_lookup_string(info, ZPOOL_CREATE_INFO_VDEV, &vdev) != 0) {
1572 nvlist_free(outnv);
1573 return (EINVAL);
1574 }
1575
1576 if (nvlist_lookup_string(info, ZPOOL_CREATE_INFO_POOL, &pname) == 0) {
1577 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1578 "device '%s' is part of active pool '%s'"),
1579 vdev, pname);
1580 } else {
1581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1582 "device '%s' is in use"), vdev);
1583 }
1584
1585 nvlist_free(outnv);
1586 return (0);
1587 }
1588
1589 /*
1590 * Create the named pool, using the provided vdev list. It is assumed
1591 * that the consumer has already validated the contents of the nvlist, so we
1592 * don't have to worry about error semantics.
1593 */
1594 int
zpool_create(libzfs_handle_t * hdl,const char * pool,nvlist_t * nvroot,nvlist_t * props,nvlist_t * fsprops)1595 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1596 nvlist_t *props, nvlist_t *fsprops)
1597 {
1598 zfs_cmd_t zc = {"\0"};
1599 nvlist_t *zc_fsprops = NULL;
1600 nvlist_t *zc_props = NULL;
1601 nvlist_t *hidden_args = NULL;
1602 uint8_t *wkeydata = NULL;
1603 uint_t wkeylen = 0;
1604 char errbuf[ERRBUFLEN];
1605 int ret = -1;
1606
1607 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1608 "cannot create '%s'"), pool);
1609
1610 if (!zpool_name_valid(hdl, B_FALSE, pool))
1611 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
1612
1613 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1614
1615 if (props) {
1616 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1617
1618 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1619 SPA_VERSION_1, flags, errbuf)) == NULL) {
1620 goto create_failed;
1621 }
1622 }
1623
1624 if (fsprops) {
1625 uint64_t zoned;
1626 const char *zonestr;
1627
1628 zoned = ((nvlist_lookup_string(fsprops,
1629 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1630 strcmp(zonestr, "on") == 0);
1631
1632 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1633 fsprops, zoned, NULL, NULL, B_TRUE, errbuf)) == NULL) {
1634 goto create_failed;
1635 }
1636
1637 if (!zc_props &&
1638 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1639 goto create_failed;
1640 }
1641 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1642 &wkeydata, &wkeylen) != 0) {
1643 zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
1644 goto create_failed;
1645 }
1646 if (nvlist_add_nvlist(zc_props,
1647 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1648 goto create_failed;
1649 }
1650 if (wkeydata != NULL) {
1651 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1652 goto create_failed;
1653
1654 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1655 wkeydata, wkeylen) != 0)
1656 goto create_failed;
1657
1658 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1659 hidden_args) != 0)
1660 goto create_failed;
1661 }
1662 }
1663
1664 if (zc_props)
1665 zcmd_write_src_nvlist(hdl, &zc, zc_props);
1666
1667 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1668 zcmd_alloc_dst_nvlist(hdl, &zc, 4096);
1669
1670 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1671 switch (errno) {
1672 case EBUSY:
1673 /*
1674 * This can happen if the user has specified the same
1675 * device multiple times. We can't reliably detect this
1676 * until we try to add it and see we already have a
1677 * label. This can also happen under if the device is
1678 * part of an active md or lvm device.
1679 */
1680 if (zpool_create_info(hdl, &zc) != 0) {
1681 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1682 "one or more vdevs refer to the same "
1683 "device, or one of\nthe devices is "
1684 "part of an active md or lvm device"));
1685 }
1686 ret = zfs_error(hdl, EZFS_BADDEV, errbuf);
1687 break;
1688
1689 case ERANGE:
1690 /*
1691 * This happens if the record size is smaller or larger
1692 * than the allowed size range, or not a power of 2.
1693 *
1694 * NOTE: although zfs_valid_proplist is called earlier,
1695 * this case may have slipped through since the
1696 * pool does not exist yet and it is therefore
1697 * impossible to read properties e.g. max blocksize
1698 * from the pool.
1699 */
1700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1701 "record size invalid"));
1702 ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
1703 break;
1704
1705 case EOVERFLOW:
1706 /*
1707 * This occurs when one of the devices is below
1708 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1709 * device was the problem device since there's no
1710 * reliable way to determine device size from userland.
1711 */
1712 {
1713 char buf[64];
1714
1715 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1716 sizeof (buf));
1717
1718 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1719 "one or more devices is less than the "
1720 "minimum size (%s)"), buf);
1721 }
1722 ret = zfs_error(hdl, EZFS_BADDEV, errbuf);
1723 break;
1724
1725 case ENOSPC:
1726 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1727 "one or more devices is out of space"));
1728 ret = zfs_error(hdl, EZFS_BADDEV, errbuf);
1729 break;
1730
1731 case EINVAL:
1732 if (zpool_has_draid_vdev(nvroot) &&
1733 zfeature_lookup_name("draid", NULL) != 0) {
1734 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1735 "dRAID vdevs are unsupported by the "
1736 "kernel"));
1737 ret = zfs_error(hdl, EZFS_BADDEV, errbuf);
1738 } else {
1739 ret = zpool_standard_error(hdl, errno, errbuf);
1740 }
1741 break;
1742
1743 case ENXIO:
1744 if (zpool_create_info(hdl, &zc) == 0) {
1745 ret = zfs_error(hdl, EZFS_BADDEV, errbuf);
1746 } else {
1747 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1748 "one or more devices could not be "
1749 "opened"));
1750 ret = zfs_error(hdl, EZFS_BADDEV, errbuf);
1751 }
1752 break;
1753
1754 case EDOM:
1755 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1756 "block size out of range or does not match"));
1757 ret = zfs_error(hdl, EZFS_BADDEV, errbuf);
1758 break;
1759
1760 default:
1761 ret = zpool_standard_error(hdl, errno, errbuf);
1762 break;
1763 }
1764 }
1765
1766 create_failed:
1767 zcmd_free_nvlists(&zc);
1768 nvlist_free(zc_props);
1769 nvlist_free(zc_fsprops);
1770 nvlist_free(hidden_args);
1771 if (wkeydata != NULL)
1772 free(wkeydata);
1773 return (ret);
1774 }
1775
1776 /*
1777 * Destroy the given pool. It is up to the caller to ensure that there are no
1778 * datasets left in the pool.
1779 */
1780 int
zpool_destroy(zpool_handle_t * zhp,const char * log_str)1781 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1782 {
1783 zfs_cmd_t zc = {"\0"};
1784 zfs_handle_t *zfp = NULL;
1785 libzfs_handle_t *hdl = zhp->zpool_hdl;
1786 char errbuf[ERRBUFLEN];
1787
1788 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1789 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1790 return (-1);
1791
1792 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1793 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1794
1795 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1796 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1797 "cannot destroy '%s'"), zhp->zpool_name);
1798
1799 if (errno == EROFS) {
1800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1801 "one or more devices is read only"));
1802 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1803 } else {
1804 (void) zpool_standard_error(hdl, errno, errbuf);
1805 }
1806
1807 if (zfp)
1808 zfs_close(zfp);
1809 return (-1);
1810 }
1811
1812 if (zfp) {
1813 remove_mountpoint(zfp);
1814 zfs_close(zfp);
1815 }
1816
1817 return (0);
1818 }
1819
1820 /*
1821 * Create a checkpoint in the given pool.
1822 */
1823 int
zpool_checkpoint(zpool_handle_t * zhp)1824 zpool_checkpoint(zpool_handle_t *zhp)
1825 {
1826 libzfs_handle_t *hdl = zhp->zpool_hdl;
1827 char errbuf[ERRBUFLEN];
1828 int error;
1829
1830 error = lzc_pool_checkpoint(zhp->zpool_name);
1831 if (error != 0) {
1832 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1833 "cannot checkpoint '%s'"), zhp->zpool_name);
1834 (void) zpool_standard_error(hdl, error, errbuf);
1835 return (-1);
1836 }
1837
1838 return (0);
1839 }
1840
1841 /*
1842 * Discard the checkpoint from the given pool.
1843 */
1844 int
zpool_discard_checkpoint(zpool_handle_t * zhp)1845 zpool_discard_checkpoint(zpool_handle_t *zhp)
1846 {
1847 libzfs_handle_t *hdl = zhp->zpool_hdl;
1848 char errbuf[ERRBUFLEN];
1849 int error;
1850
1851 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1852 if (error != 0) {
1853 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1854 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1855 (void) zpool_standard_error(hdl, error, errbuf);
1856 return (-1);
1857 }
1858
1859 return (0);
1860 }
1861
1862 /*
1863 * Load data type for the given pool.
1864 */
1865 int
zpool_prefetch(zpool_handle_t * zhp,zpool_prefetch_type_t type)1866 zpool_prefetch(zpool_handle_t *zhp, zpool_prefetch_type_t type)
1867 {
1868 libzfs_handle_t *hdl = zhp->zpool_hdl;
1869 char msg[1024];
1870 int error;
1871
1872 error = lzc_pool_prefetch(zhp->zpool_name, type);
1873 if (error != 0) {
1874 const char *typename = "unknown";
1875 if (type == ZPOOL_PREFETCH_DDT)
1876 typename = "ddt";
1877 else if (type == ZPOOL_PREFETCH_BRT)
1878 typename = "brt";
1879 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1880 "cannot prefetch %s in '%s'"), typename, zhp->zpool_name);
1881 (void) zpool_standard_error(hdl, error, msg);
1882 return (-1);
1883 }
1884
1885 return (0);
1886 }
1887
1888 /*
1889 * Add the given vdevs to the pool. The caller must have already performed the
1890 * necessary verification to ensure that the vdev specification is well-formed.
1891 */
1892 int
zpool_add(zpool_handle_t * zhp,nvlist_t * nvroot,boolean_t check_ashift)1893 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot, boolean_t check_ashift)
1894 {
1895 zfs_cmd_t zc = {"\0"};
1896 int ret;
1897 libzfs_handle_t *hdl = zhp->zpool_hdl;
1898 char errbuf[ERRBUFLEN];
1899 nvlist_t **spares, **l2cache;
1900 uint_t nspares, nl2cache;
1901
1902 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1903 "cannot add to '%s'"), zhp->zpool_name);
1904
1905 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1906 SPA_VERSION_SPARES &&
1907 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1908 &spares, &nspares) == 0) {
1909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1910 "upgraded to add hot spares"));
1911 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1912 }
1913
1914 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1915 SPA_VERSION_L2CACHE &&
1916 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1917 &l2cache, &nl2cache) == 0) {
1918 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1919 "upgraded to add cache devices"));
1920 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
1921 }
1922
1923 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
1924 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1925 zc.zc_flags = check_ashift;
1926
1927 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1928 switch (errno) {
1929 case EBUSY:
1930 /*
1931 * This can happen if the user has specified the same
1932 * device multiple times. We can't reliably detect this
1933 * until we try to add it and see we already have a
1934 * label.
1935 */
1936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1937 "one or more vdevs refer to the same device"));
1938 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1939 break;
1940
1941 case EINVAL:
1942
1943 if (zpool_has_draid_vdev(nvroot) &&
1944 zfeature_lookup_name("draid", NULL) != 0) {
1945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1946 "dRAID vdevs are unsupported by the "
1947 "kernel"));
1948 } else {
1949 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1950 "invalid config; a pool with removing/"
1951 "removed vdevs does not support adding "
1952 "raidz or dRAID vdevs"));
1953 }
1954
1955 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1956 break;
1957
1958 case EOVERFLOW:
1959 /*
1960 * This occurs when one of the devices is below
1961 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1962 * device was the problem device since there's no
1963 * reliable way to determine device size from userland.
1964 */
1965 {
1966 char buf[64];
1967
1968 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1969 sizeof (buf));
1970
1971 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1972 "device is less than the minimum "
1973 "size (%s)"), buf);
1974 }
1975 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
1976 break;
1977
1978 case ENOTSUP:
1979 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1980 "pool must be upgraded to add these vdevs"));
1981 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
1982 break;
1983
1984 default:
1985 (void) zpool_standard_error(hdl, errno, errbuf);
1986 }
1987
1988 ret = -1;
1989 } else {
1990 ret = 0;
1991 }
1992
1993 zcmd_free_nvlists(&zc);
1994
1995 return (ret);
1996 }
1997
1998 /*
1999 * Exports the pool from the system. The caller must ensure that there are no
2000 * mounted datasets in the pool.
2001 */
2002 static int
zpool_export_common(zpool_handle_t * zhp,boolean_t force,boolean_t hardforce,const char * log_str)2003 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
2004 const char *log_str)
2005 {
2006 zfs_cmd_t zc = {"\0"};
2007
2008 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2009 zc.zc_cookie = force;
2010 zc.zc_guid = hardforce;
2011 zc.zc_history = (uint64_t)(uintptr_t)log_str;
2012
2013 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
2014 switch (errno) {
2015 case EXDEV:
2016 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2017 "use '-f' to override the following errors:\n"
2018 "'%s' has an active shared spare which could be"
2019 " used by other pools once '%s' is exported."),
2020 zhp->zpool_name, zhp->zpool_name);
2021 return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
2022 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
2023 zhp->zpool_name));
2024 default:
2025 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
2026 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
2027 zhp->zpool_name));
2028 }
2029 }
2030
2031 return (0);
2032 }
2033
2034 int
zpool_export(zpool_handle_t * zhp,boolean_t force,const char * log_str)2035 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
2036 {
2037 return (zpool_export_common(zhp, force, B_FALSE, log_str));
2038 }
2039
2040 int
zpool_export_force(zpool_handle_t * zhp,const char * log_str)2041 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
2042 {
2043 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
2044 }
2045
2046 static void
zpool_rewind_exclaim(libzfs_handle_t * hdl,const char * name,boolean_t dryrun,nvlist_t * config)2047 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
2048 nvlist_t *config)
2049 {
2050 nvlist_t *nv = NULL;
2051 uint64_t rewindto;
2052 int64_t loss = -1;
2053 struct tm t;
2054 char timestr[128];
2055
2056 if (!hdl->libzfs_printerr || config == NULL)
2057 return;
2058
2059 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
2060 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
2061 return;
2062 }
2063
2064 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
2065 return;
2066 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
2067
2068 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
2069 ctime_r((time_t *)&rewindto, timestr) != NULL) {
2070 timestr[24] = 0;
2071 if (dryrun) {
2072 (void) printf(dgettext(TEXT_DOMAIN,
2073 "Would be able to return %s "
2074 "to its state as of %s.\n"),
2075 name, timestr);
2076 } else {
2077 (void) printf(dgettext(TEXT_DOMAIN,
2078 "Pool %s returned to its state as of %s.\n"),
2079 name, timestr);
2080 }
2081 if (loss > 120) {
2082 (void) printf(dgettext(TEXT_DOMAIN,
2083 "%s approximately %lld "),
2084 dryrun ? "Would discard" : "Discarded",
2085 ((longlong_t)loss + 30) / 60);
2086 (void) printf(dgettext(TEXT_DOMAIN,
2087 "minutes of transactions.\n"));
2088 } else if (loss > 0) {
2089 (void) printf(dgettext(TEXT_DOMAIN,
2090 "%s approximately %lld "),
2091 dryrun ? "Would discard" : "Discarded",
2092 (longlong_t)loss);
2093 (void) printf(dgettext(TEXT_DOMAIN,
2094 "seconds of transactions.\n"));
2095 }
2096 }
2097 }
2098
2099 void
zpool_explain_recover(libzfs_handle_t * hdl,const char * name,int reason,nvlist_t * config,char * buf,size_t size)2100 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
2101 nvlist_t *config, char *buf, size_t size)
2102 {
2103 nvlist_t *nv = NULL;
2104 int64_t loss = -1;
2105 uint64_t edata = UINT64_MAX;
2106 uint64_t rewindto;
2107 struct tm t;
2108 char timestr[128], temp[1024];
2109
2110 if (!hdl->libzfs_printerr)
2111 return;
2112
2113 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
2114 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
2115 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
2116 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
2117 goto no_info;
2118
2119 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
2120 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
2121 &edata);
2122
2123 (void) snprintf(buf, size, dgettext(TEXT_DOMAIN,
2124 "Recovery is possible, but will result in some data loss.\n"));
2125
2126 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
2127 ctime_r((time_t *)&rewindto, timestr) != NULL) {
2128 timestr[24] = 0;
2129 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2130 "\tReturning the pool to its state as of %s\n"
2131 "\tshould correct the problem. "), timestr);
2132 (void) strlcat(buf, temp, size);
2133 } else {
2134 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2135 "\tReverting the pool to an earlier state "
2136 "should correct the problem.\n\t"), size);
2137 }
2138
2139 if (loss > 120) {
2140 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2141 "Approximately %lld minutes of data\n"
2142 "\tmust be discarded, irreversibly. "),
2143 ((longlong_t)loss + 30) / 60);
2144 (void) strlcat(buf, temp, size);
2145 } else if (loss > 0) {
2146 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2147 "Approximately %lld seconds of data\n"
2148 "\tmust be discarded, irreversibly. "),
2149 (longlong_t)loss);
2150 (void) strlcat(buf, temp, size);
2151 }
2152 if (edata != 0 && edata != UINT64_MAX) {
2153 if (edata == 1) {
2154 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2155 "After rewind, at least\n"
2156 "\tone persistent user-data error will remain. "),
2157 size);
2158 } else {
2159 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2160 "After rewind, several\n"
2161 "\tpersistent user-data errors will remain. "),
2162 size);
2163 }
2164 }
2165 (void) snprintf(temp, 1024, dgettext(TEXT_DOMAIN,
2166 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
2167 reason >= 0 ? "clear" : "import", name);
2168 (void) strlcat(buf, temp, size);
2169
2170 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2171 "A scrub of the pool\n"
2172 "\tis strongly recommended after recovery.\n"), size);
2173 return;
2174
2175 no_info:
2176 (void) strlcat(buf, dgettext(TEXT_DOMAIN,
2177 "Ensure all pool devices are present and accessible, then "
2178 "retry the import.\n\tIf the problem persists, destroy and "
2179 "re-create the pool from a backup source.\n"), size);
2180 }
2181
2182 /*
2183 * zpool_import() is a contracted interface. Should be kept the same
2184 * if possible.
2185 *
2186 * Applications should use zpool_import_props() to import a pool with
2187 * new properties value to be set.
2188 */
2189 int
zpool_import(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,char * altroot)2190 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2191 char *altroot)
2192 {
2193 nvlist_t *props = NULL;
2194 int ret;
2195
2196 if (altroot != NULL) {
2197 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
2198 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2199 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2200 newname));
2201 }
2202
2203 if (nvlist_add_string(props,
2204 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
2205 nvlist_add_string(props,
2206 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
2207 nvlist_free(props);
2208 return (zfs_error_fmt(hdl, EZFS_NOMEM,
2209 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2210 newname));
2211 }
2212 }
2213
2214 ret = zpool_import_props(hdl, config, newname, props,
2215 ZFS_IMPORT_NORMAL);
2216 nvlist_free(props);
2217 return (ret);
2218 }
2219
2220 static void
print_vdev_tree(libzfs_handle_t * hdl,const char * name,nvlist_t * nv,int indent)2221 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
2222 int indent)
2223 {
2224 nvlist_t **child;
2225 uint_t c, children;
2226 char *vname;
2227 uint64_t is_log = 0;
2228
2229 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
2230 &is_log);
2231
2232 if (name != NULL)
2233 (void) printf("\t%*s%s%s\n", indent, "", name,
2234 is_log ? " [log]" : "");
2235
2236 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2237 &child, &children) != 0)
2238 return;
2239
2240 for (c = 0; c < children; c++) {
2241 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
2242 print_vdev_tree(hdl, vname, child[c], indent + 2);
2243 free(vname);
2244 }
2245 }
2246
2247 void
zpool_collect_unsup_feat(nvlist_t * config,char * buf,size_t size)2248 zpool_collect_unsup_feat(nvlist_t *config, char *buf, size_t size)
2249 {
2250 nvlist_t *nvinfo, *unsup_feat;
2251 char temp[512];
2252
2253 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
2254 unsup_feat = fnvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT);
2255
2256 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL);
2257 nvp != NULL; nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
2258 const char *desc = fnvpair_value_string(nvp);
2259 if (strlen(desc) > 0) {
2260 (void) snprintf(temp, 512, "\t%s (%s)\n",
2261 nvpair_name(nvp), desc);
2262 (void) strlcat(buf, temp, size);
2263 } else {
2264 (void) snprintf(temp, 512, "\t%s\n", nvpair_name(nvp));
2265 (void) strlcat(buf, temp, size);
2266 }
2267 }
2268 }
2269
2270 /*
2271 * Import the given pool using the known configuration and a list of
2272 * properties to be set. The configuration should have come from
2273 * zpool_find_import(). The 'newname' parameters control whether the pool
2274 * is imported with a different name.
2275 */
2276 int
zpool_import_props(libzfs_handle_t * hdl,nvlist_t * config,const char * newname,nvlist_t * props,int flags)2277 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2278 nvlist_t *props, int flags)
2279 {
2280 zfs_cmd_t zc = {"\0"};
2281 zpool_load_policy_t policy;
2282 nvlist_t *nv = NULL;
2283 nvlist_t *nvinfo = NULL;
2284 nvlist_t *missing = NULL;
2285 const char *thename;
2286 const char *origname;
2287 int ret;
2288 int error = 0;
2289 char buf[2048];
2290 char errbuf[ERRBUFLEN];
2291
2292 origname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
2293
2294 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2295 "cannot import pool '%s'"), origname);
2296
2297 if (newname != NULL) {
2298 if (!zpool_name_valid(hdl, B_FALSE, newname))
2299 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
2300 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2301 newname));
2302 thename = newname;
2303 } else {
2304 thename = origname;
2305 }
2306
2307 if (props != NULL) {
2308 uint64_t version;
2309 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2310
2311 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
2312
2313 if ((props = zpool_valid_proplist(hdl, origname,
2314 props, version, flags, errbuf)) == NULL)
2315 return (-1);
2316 zcmd_write_src_nvlist(hdl, &zc, props);
2317 nvlist_free(props);
2318 }
2319
2320 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
2321
2322 zc.zc_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
2323
2324 zcmd_write_conf_nvlist(hdl, &zc, config);
2325 zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2);
2326
2327 zc.zc_cookie = flags;
2328 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
2329 errno == ENOMEM)
2330 zcmd_expand_dst_nvlist(hdl, &zc);
2331 if (ret != 0)
2332 error = errno;
2333
2334 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
2335
2336 zcmd_free_nvlists(&zc);
2337
2338 zpool_get_load_policy(config, &policy);
2339
2340 if (getenv("ZFS_LOAD_INFO_DEBUG") && nv != NULL &&
2341 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
2342 dump_nvlist(nvinfo, 4);
2343 }
2344
2345 if (error) {
2346 char desc[1024];
2347 char aux[256];
2348
2349 /*
2350 * Dry-run failed, but we print out what success
2351 * looks like if we found a best txg
2352 */
2353 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
2354 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2355 B_TRUE, nv);
2356 nvlist_free(nv);
2357 return (-1);
2358 }
2359
2360 if (newname == NULL)
2361 (void) snprintf(desc, sizeof (desc),
2362 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2363 thename);
2364 else
2365 (void) snprintf(desc, sizeof (desc),
2366 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
2367 origname, thename);
2368
2369 switch (error) {
2370 case ENOTSUP:
2371 if (nv != NULL && nvlist_lookup_nvlist(nv,
2372 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2373 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
2374 (void) printf(dgettext(TEXT_DOMAIN, "This "
2375 "pool uses the following feature(s) not "
2376 "supported by this system:\n"));
2377 memset(buf, 0, 2048);
2378 zpool_collect_unsup_feat(nv, buf, 2048);
2379 (void) printf("%s", buf);
2380 if (nvlist_exists(nvinfo,
2381 ZPOOL_CONFIG_CAN_RDONLY)) {
2382 (void) printf(dgettext(TEXT_DOMAIN,
2383 "All unsupported features are only "
2384 "required for writing to the pool."
2385 "\nThe pool can be imported using "
2386 "'-o readonly=on'.\n"));
2387 }
2388 }
2389 /*
2390 * Unsupported version.
2391 */
2392 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
2393 break;
2394
2395 case EREMOTEIO:
2396 if (nv != NULL && nvlist_lookup_nvlist(nv,
2397 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
2398 const char *hostname = "<unknown>";
2399 uint64_t hostid = 0;
2400 mmp_state_t mmp_state;
2401
2402 mmp_state = fnvlist_lookup_uint64(nvinfo,
2403 ZPOOL_CONFIG_MMP_STATE);
2404
2405 if (nvlist_exists(nvinfo,
2406 ZPOOL_CONFIG_MMP_HOSTNAME))
2407 hostname = fnvlist_lookup_string(nvinfo,
2408 ZPOOL_CONFIG_MMP_HOSTNAME);
2409
2410 if (nvlist_exists(nvinfo,
2411 ZPOOL_CONFIG_MMP_HOSTID))
2412 hostid = fnvlist_lookup_uint64(nvinfo,
2413 ZPOOL_CONFIG_MMP_HOSTID);
2414
2415 if (mmp_state == MMP_STATE_ACTIVE) {
2416 (void) snprintf(aux, sizeof (aux),
2417 dgettext(TEXT_DOMAIN, "pool is imp"
2418 "orted on host '%s' (hostid=%lx).\n"
2419 "Export the pool on the other "
2420 "system, then run 'zpool import'."),
2421 hostname, (unsigned long) hostid);
2422 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
2423 (void) snprintf(aux, sizeof (aux),
2424 dgettext(TEXT_DOMAIN, "pool has "
2425 "the multihost property on and "
2426 "the\nsystem's hostid is not set. "
2427 "Set a unique system hostid with "
2428 "the zgenhostid(8) command.\n"));
2429 }
2430
2431 (void) zfs_error_aux(hdl, "%s", aux);
2432 }
2433 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2434 break;
2435
2436 case EINVAL:
2437 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2438 break;
2439
2440 case EROFS:
2441 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2442 "one or more devices is read only"));
2443 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2444 break;
2445
2446 case ENXIO:
2447 if (nv && nvlist_lookup_nvlist(nv,
2448 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2449 nvlist_lookup_nvlist(nvinfo,
2450 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2451 (void) printf(dgettext(TEXT_DOMAIN,
2452 "The devices below are missing or "
2453 "corrupted, use '-m' to import the pool "
2454 "anyway:\n"));
2455 print_vdev_tree(hdl, NULL, missing, 2);
2456 (void) printf("\n");
2457 }
2458 (void) zpool_standard_error(hdl, error, desc);
2459 break;
2460
2461 case EEXIST:
2462 (void) zpool_standard_error(hdl, error, desc);
2463 break;
2464
2465 case EBUSY:
2466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2467 "one or more devices are already in use\n"));
2468 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2469 break;
2470 case ENAMETOOLONG:
2471 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2472 "new name of at least one dataset is longer than "
2473 "the maximum allowable length"));
2474 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2475 break;
2476 default:
2477 (void) zpool_standard_error(hdl, error, desc);
2478 memset(buf, 0, 2048);
2479 zpool_explain_recover(hdl,
2480 newname ? origname : thename, -error, nv,
2481 buf, 2048);
2482 (void) printf("\t%s", buf);
2483 break;
2484 }
2485
2486 nvlist_free(nv);
2487 ret = -1;
2488 } else {
2489 zpool_handle_t *zhp;
2490
2491 /*
2492 * This should never fail, but play it safe anyway.
2493 */
2494 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2495 ret = -1;
2496 else if (zhp != NULL)
2497 zpool_close(zhp);
2498 if (policy.zlp_rewind &
2499 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2500 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2501 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2502 }
2503 nvlist_free(nv);
2504 }
2505
2506 return (ret);
2507 }
2508
2509 /*
2510 * Translate vdev names to guids. If a vdev_path is determined to be
2511 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2512 * are added to it.
2513 */
2514 static int
zpool_translate_vdev_guids(zpool_handle_t * zhp,nvlist_t * vds,nvlist_t * vdev_guids,nvlist_t * guids_to_paths,nvlist_t ** vd_errlist)2515 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2516 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2517 {
2518 nvlist_t *errlist = NULL;
2519 int error = 0;
2520
2521 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2522 elem = nvlist_next_nvpair(vds, elem)) {
2523 boolean_t spare, cache;
2524
2525 const char *vd_path = nvpair_name(elem);
2526 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2527 NULL);
2528
2529 if ((tgt == NULL) || cache || spare) {
2530 if (errlist == NULL) {
2531 errlist = fnvlist_alloc();
2532 error = EINVAL;
2533 }
2534
2535 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2536 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2537 fnvlist_add_int64(errlist, vd_path, err);
2538 continue;
2539 }
2540
2541 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2542 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2543
2544 char msg[MAXNAMELEN];
2545 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2546 fnvlist_add_string(guids_to_paths, msg, vd_path);
2547 }
2548
2549 if (error != 0) {
2550 verify(errlist != NULL);
2551 if (vd_errlist != NULL)
2552 *vd_errlist = errlist;
2553 else
2554 fnvlist_free(errlist);
2555 }
2556
2557 return (error);
2558 }
2559
2560 static int
xlate_init_err(int err)2561 xlate_init_err(int err)
2562 {
2563 switch (err) {
2564 case ENODEV:
2565 return (EZFS_NODEVICE);
2566 case EINVAL:
2567 case EROFS:
2568 return (EZFS_BADDEV);
2569 case EBUSY:
2570 return (EZFS_INITIALIZING);
2571 case ESRCH:
2572 return (EZFS_NO_INITIALIZE);
2573 }
2574 return (err);
2575 }
2576
2577 int
zpool_initialize_one(zpool_handle_t * zhp,void * data)2578 zpool_initialize_one(zpool_handle_t *zhp, void *data)
2579 {
2580 int error;
2581 libzfs_handle_t *hdl = zpool_get_handle(zhp);
2582 const char *pool_name = zpool_get_name(zhp);
2583 if (zpool_open_silent(hdl, pool_name, &zhp) != 0)
2584 return (-1);
2585 initialize_cbdata_t *cb = data;
2586 nvlist_t *vdevs = fnvlist_alloc();
2587
2588 nvlist_t *config = zpool_get_config(zhp, NULL);
2589 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
2590 ZPOOL_CONFIG_VDEV_TREE);
2591 zpool_collect_leaves(zhp, nvroot, vdevs);
2592 if (cb->wait)
2593 error = zpool_initialize_wait(zhp, cb->cmd_type, vdevs);
2594 else
2595 error = zpool_initialize(zhp, cb->cmd_type, vdevs);
2596 fnvlist_free(vdevs);
2597
2598 return (error);
2599 }
2600
2601 /*
2602 * Begin, suspend, cancel, or uninit (clear) the initialization (initializing
2603 * of all free blocks) for the given vdevs in the given pool.
2604 */
2605 static int
zpool_initialize_impl(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds,boolean_t wait)2606 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2607 nvlist_t *vds, boolean_t wait)
2608 {
2609 int err;
2610
2611 nvlist_t *vdev_guids = fnvlist_alloc();
2612 nvlist_t *guids_to_paths = fnvlist_alloc();
2613 nvlist_t *vd_errlist = NULL;
2614 nvlist_t *errlist;
2615 nvpair_t *elem;
2616
2617 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2618 guids_to_paths, &vd_errlist);
2619
2620 if (err != 0) {
2621 verify(vd_errlist != NULL);
2622 goto list_errors;
2623 }
2624
2625 err = lzc_initialize(zhp->zpool_name, cmd_type,
2626 vdev_guids, &errlist);
2627
2628 if (err != 0) {
2629 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2630 ZPOOL_INITIALIZE_VDEVS, &vd_errlist) == 0) {
2631 goto list_errors;
2632 }
2633
2634 if (err == EINVAL && cmd_type == POOL_INITIALIZE_UNINIT) {
2635 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2636 "uninitialize is not supported by kernel"));
2637 }
2638
2639 (void) zpool_standard_error(zhp->zpool_hdl, err,
2640 dgettext(TEXT_DOMAIN, "operation failed"));
2641 goto out;
2642 }
2643
2644 if (wait) {
2645 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2646 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2647
2648 uint64_t guid = fnvpair_value_uint64(elem);
2649
2650 err = lzc_wait_tag(zhp->zpool_name,
2651 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2652 if (err != 0) {
2653 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2654 err, dgettext(TEXT_DOMAIN, "error "
2655 "waiting for '%s' to initialize"),
2656 nvpair_name(elem));
2657
2658 goto out;
2659 }
2660 }
2661 }
2662 goto out;
2663
2664 list_errors:
2665 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2666 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2667 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2668 const char *path;
2669
2670 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2671 &path) != 0)
2672 path = nvpair_name(elem);
2673
2674 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2675 "cannot initialize '%s'", path);
2676 }
2677
2678 out:
2679 fnvlist_free(vdev_guids);
2680 fnvlist_free(guids_to_paths);
2681
2682 if (vd_errlist != NULL)
2683 fnvlist_free(vd_errlist);
2684
2685 return (err == 0 ? 0 : -1);
2686 }
2687
2688 int
zpool_initialize(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2689 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2690 nvlist_t *vds)
2691 {
2692 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2693 }
2694
2695 int
zpool_initialize_wait(zpool_handle_t * zhp,pool_initialize_func_t cmd_type,nvlist_t * vds)2696 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2697 nvlist_t *vds)
2698 {
2699 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2700 }
2701
2702 static int
xlate_trim_err(int err)2703 xlate_trim_err(int err)
2704 {
2705 switch (err) {
2706 case ENODEV:
2707 return (EZFS_NODEVICE);
2708 case EINVAL:
2709 case EROFS:
2710 return (EZFS_BADDEV);
2711 case EBUSY:
2712 return (EZFS_TRIMMING);
2713 case ESRCH:
2714 return (EZFS_NO_TRIM);
2715 case EOPNOTSUPP:
2716 return (EZFS_TRIM_NOTSUP);
2717 }
2718 return (err);
2719 }
2720
2721 void
zpool_collect_leaves(zpool_handle_t * zhp,nvlist_t * nvroot,nvlist_t * res)2722 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
2723 {
2724 libzfs_handle_t *hdl = zhp->zpool_hdl;
2725 uint_t children = 0;
2726 nvlist_t **child;
2727 uint_t i;
2728
2729 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2730 &child, &children);
2731
2732 if (children == 0) {
2733 char *path = zpool_vdev_name(hdl, zhp, nvroot,
2734 VDEV_NAME_PATH);
2735
2736 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
2737 strcmp(path, VDEV_TYPE_HOLE) != 0)
2738 fnvlist_add_boolean(res, path);
2739
2740 free(path);
2741 return;
2742 }
2743
2744 for (i = 0; i < children; i++) {
2745 zpool_collect_leaves(zhp, child[i], res);
2746 }
2747 }
2748
2749 int
zpool_trim_one(zpool_handle_t * zhp,void * data)2750 zpool_trim_one(zpool_handle_t *zhp, void *data)
2751 {
2752 int error;
2753 libzfs_handle_t *hdl = zpool_get_handle(zhp);
2754 const char *pool_name = zpool_get_name(zhp);
2755 if (zpool_open_silent(hdl, pool_name, &zhp) != 0)
2756 return (-1);
2757
2758 trim_cbdata_t *cb = data;
2759 nvlist_t *vdevs = fnvlist_alloc();
2760
2761 /* no individual leaf vdevs specified, so add them all */
2762 nvlist_t *config = zpool_get_config(zhp, NULL);
2763 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
2764 ZPOOL_CONFIG_VDEV_TREE);
2765
2766 zpool_collect_leaves(zhp, nvroot, vdevs);
2767 error = zpool_trim(zhp, cb->cmd_type, vdevs, &cb->trim_flags);
2768 fnvlist_free(vdevs);
2769
2770 return (error);
2771 }
2772
2773 static int
zpool_trim_wait(zpool_handle_t * zhp,nvlist_t * vdev_guids)2774 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2775 {
2776 int err;
2777 nvpair_t *elem;
2778
2779 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2780 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2781
2782 uint64_t guid = fnvpair_value_uint64(elem);
2783
2784 err = lzc_wait_tag(zhp->zpool_name,
2785 ZPOOL_WAIT_TRIM, guid, NULL);
2786 if (err != 0) {
2787 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2788 err, dgettext(TEXT_DOMAIN, "error "
2789 "waiting to trim '%s'"), nvpair_name(elem));
2790
2791 return (err);
2792 }
2793 }
2794 return (0);
2795 }
2796
2797 /*
2798 * Check errlist and report any errors, omitting ones which should be
2799 * suppressed. Returns B_TRUE if any errors were reported.
2800 */
2801 static boolean_t
check_trim_errs(zpool_handle_t * zhp,trimflags_t * trim_flags,nvlist_t * guids_to_paths,nvlist_t * vds,nvlist_t * errlist)2802 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2803 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2804 {
2805 nvpair_t *elem;
2806 boolean_t reported_errs = B_FALSE;
2807 int num_vds = 0;
2808 int num_suppressed_errs = 0;
2809
2810 for (elem = nvlist_next_nvpair(vds, NULL);
2811 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2812 num_vds++;
2813 }
2814
2815 for (elem = nvlist_next_nvpair(errlist, NULL);
2816 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2817 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2818 const char *path;
2819
2820 /*
2821 * If only the pool was specified, and it was not a secure
2822 * trim then suppress warnings for individual vdevs which
2823 * do not support trimming.
2824 */
2825 if (vd_error == EZFS_TRIM_NOTSUP &&
2826 trim_flags->fullpool &&
2827 !trim_flags->secure) {
2828 num_suppressed_errs++;
2829 continue;
2830 }
2831
2832 reported_errs = B_TRUE;
2833 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2834 &path) != 0)
2835 path = nvpair_name(elem);
2836
2837 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2838 "cannot trim '%s'", path);
2839 }
2840
2841 if (num_suppressed_errs == num_vds) {
2842 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2843 "no devices in pool support trim operations"));
2844 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2845 dgettext(TEXT_DOMAIN, "cannot trim")));
2846 reported_errs = B_TRUE;
2847 }
2848
2849 return (reported_errs);
2850 }
2851
2852 /*
2853 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2854 * the given vdevs in the given pool.
2855 */
2856 int
zpool_trim(zpool_handle_t * zhp,pool_trim_func_t cmd_type,nvlist_t * vds,trimflags_t * trim_flags)2857 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2858 trimflags_t *trim_flags)
2859 {
2860 int err;
2861 int retval = 0;
2862
2863 nvlist_t *vdev_guids = fnvlist_alloc();
2864 nvlist_t *guids_to_paths = fnvlist_alloc();
2865 nvlist_t *errlist = NULL;
2866
2867 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2868 guids_to_paths, &errlist);
2869 if (err != 0) {
2870 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2871 retval = -1;
2872 goto out;
2873 }
2874
2875 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2876 trim_flags->secure, vdev_guids, &errlist);
2877 if (err != 0) {
2878 nvlist_t *vd_errlist;
2879 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2880 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2881 if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2882 vds, vd_errlist)) {
2883 retval = -1;
2884 goto out;
2885 }
2886 } else {
2887 char errbuf[ERRBUFLEN];
2888
2889 (void) snprintf(errbuf, sizeof (errbuf),
2890 dgettext(TEXT_DOMAIN, "operation failed"));
2891 zpool_standard_error(zhp->zpool_hdl, err, errbuf);
2892 retval = -1;
2893 goto out;
2894 }
2895 }
2896
2897
2898 if (trim_flags->wait)
2899 retval = zpool_trim_wait(zhp, vdev_guids);
2900
2901 out:
2902 if (errlist != NULL)
2903 fnvlist_free(errlist);
2904 fnvlist_free(vdev_guids);
2905 fnvlist_free(guids_to_paths);
2906 return (retval);
2907 }
2908
2909 /*
2910 * Scan the pool.
2911 */
2912 int
zpool_scan(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd)2913 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd) {
2914 return (zpool_scan_range(zhp, func, cmd, 0, 0));
2915 }
2916
2917 int
zpool_scan_range(zpool_handle_t * zhp,pool_scan_func_t func,pool_scrub_cmd_t cmd,time_t date_start,time_t date_end)2918 zpool_scan_range(zpool_handle_t *zhp, pool_scan_func_t func,
2919 pool_scrub_cmd_t cmd, time_t date_start, time_t date_end)
2920 {
2921 char errbuf[ERRBUFLEN];
2922 int err;
2923 libzfs_handle_t *hdl = zhp->zpool_hdl;
2924
2925 nvlist_t *args = fnvlist_alloc();
2926 fnvlist_add_uint64(args, "scan_type", (uint64_t)func);
2927 fnvlist_add_uint64(args, "scan_command", (uint64_t)cmd);
2928 if (date_start != 0 || date_end != 0) {
2929 fnvlist_add_uint64(args, "scan_date_start",
2930 (uint64_t)date_start);
2931 fnvlist_add_uint64(args, "scan_date_end", (uint64_t)date_end);
2932 }
2933
2934 err = lzc_scrub(ZFS_IOC_POOL_SCRUB, zhp->zpool_name, args, NULL);
2935 fnvlist_free(args);
2936
2937 if (err == 0) {
2938 return (0);
2939 } else if (err == ZFS_ERR_IOC_CMD_UNAVAIL) {
2940 zfs_cmd_t zc = {"\0"};
2941 (void) strlcpy(zc.zc_name, zhp->zpool_name,
2942 sizeof (zc.zc_name));
2943 zc.zc_cookie = func;
2944 zc.zc_flags = cmd;
2945
2946 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2947 return (0);
2948 }
2949
2950 /*
2951 * An ECANCELED on a scrub means one of the following:
2952 * 1. we resumed a paused scrub.
2953 * 2. we resumed a paused error scrub.
2954 * 3. Error scrub is not run because of no error log.
2955 *
2956 * Note that we no longer return ECANCELED in case 1 or 2. However, in
2957 * order to prevent problems where we have a newer userland than
2958 * kernel, we keep this check in place. That prevents erroneous
2959 * failures when an older kernel returns ECANCELED in those cases.
2960 */
2961 if (err == ECANCELED && (func == POOL_SCAN_SCRUB ||
2962 func == POOL_SCAN_ERRORSCRUB) && cmd == POOL_SCRUB_NORMAL)
2963 return (0);
2964 /*
2965 * The following cases have been handled here:
2966 * 1. Paused a scrub/error scrub if there is none in progress.
2967 */
2968 if (err == ENOENT && func != POOL_SCAN_NONE && cmd ==
2969 POOL_SCRUB_PAUSE) {
2970 return (0);
2971 }
2972
2973 ASSERT3U(func, >=, POOL_SCAN_NONE);
2974 ASSERT3U(func, <, POOL_SCAN_FUNCS);
2975
2976 if (func == POOL_SCAN_SCRUB || func == POOL_SCAN_ERRORSCRUB) {
2977 if (cmd == POOL_SCRUB_PAUSE) {
2978 (void) snprintf(errbuf, sizeof (errbuf),
2979 dgettext(TEXT_DOMAIN, "cannot pause scrubbing %s"),
2980 zhp->zpool_name);
2981 } else {
2982 assert(cmd == POOL_SCRUB_NORMAL);
2983 (void) snprintf(errbuf, sizeof (errbuf),
2984 dgettext(TEXT_DOMAIN, "cannot scrub %s"),
2985 zhp->zpool_name);
2986 }
2987 } else if (func == POOL_SCAN_RESILVER) {
2988 assert(cmd == POOL_SCRUB_NORMAL);
2989 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2990 "cannot restart resilver on %s"), zhp->zpool_name);
2991 } else if (func == POOL_SCAN_NONE) {
2992 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2993 "cannot cancel scrubbing %s"), zhp->zpool_name);
2994 } else {
2995 assert(!"unexpected result");
2996 }
2997
2998 /*
2999 * With EBUSY, six cases are possible:
3000 *
3001 * Current state Requested
3002 * 1. Normal Scrub Running Normal Scrub or Error Scrub
3003 * 2. Normal Scrub Paused Error Scrub
3004 * 3. Normal Scrub Paused Pause Normal Scrub
3005 * 4. Error Scrub Running Normal Scrub or Error Scrub
3006 * 5. Error Scrub Paused Pause Error Scrub
3007 * 6. Resilvering Anything else
3008 */
3009 if (err == EBUSY) {
3010 nvlist_t *nvroot;
3011 pool_scan_stat_t *ps = NULL;
3012 uint_t psc;
3013
3014 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3015 ZPOOL_CONFIG_VDEV_TREE);
3016 (void) nvlist_lookup_uint64_array(nvroot,
3017 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
3018 if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
3019 ps->pss_state == DSS_SCANNING) {
3020 if (ps->pss_pass_scrub_pause == 0) {
3021 /* handles case 1 */
3022 assert(cmd == POOL_SCRUB_NORMAL);
3023 return (zfs_error(hdl, EZFS_SCRUBBING,
3024 errbuf));
3025 } else {
3026 if (func == POOL_SCAN_ERRORSCRUB) {
3027 /* handles case 2 */
3028 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
3029 return (zfs_error(hdl,
3030 EZFS_SCRUB_PAUSED_TO_CANCEL,
3031 errbuf));
3032 } else {
3033 /* handles case 3 */
3034 ASSERT3U(func, ==, POOL_SCAN_SCRUB);
3035 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
3036 return (zfs_error(hdl,
3037 EZFS_SCRUB_PAUSED, errbuf));
3038 }
3039 }
3040 } else if (ps &&
3041 ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB &&
3042 ps->pss_error_scrub_state == DSS_ERRORSCRUBBING) {
3043 if (ps->pss_pass_error_scrub_pause == 0) {
3044 /* handles case 4 */
3045 ASSERT3U(cmd, ==, POOL_SCRUB_NORMAL);
3046 return (zfs_error(hdl, EZFS_ERRORSCRUBBING,
3047 errbuf));
3048 } else {
3049 /* handles case 5 */
3050 ASSERT3U(func, ==, POOL_SCAN_ERRORSCRUB);
3051 ASSERT3U(cmd, ==, POOL_SCRUB_PAUSE);
3052 return (zfs_error(hdl, EZFS_ERRORSCRUB_PAUSED,
3053 errbuf));
3054 }
3055 } else {
3056 /* handles case 6 */
3057 return (zfs_error(hdl, EZFS_RESILVERING, errbuf));
3058 }
3059 } else if (err == ENOENT) {
3060 return (zfs_error(hdl, EZFS_NO_SCRUB, errbuf));
3061 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
3062 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, errbuf));
3063 } else {
3064 return (zpool_standard_error(hdl, err, errbuf));
3065 }
3066 }
3067
3068 /*
3069 * Find a vdev that matches the search criteria specified. We use the
3070 * the nvpair name to determine how we should look for the device.
3071 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
3072 * spare; but FALSE if its an INUSE spare.
3073 *
3074 * If 'return_parent' is set, then return the *parent* of the vdev you're
3075 * searching for rather than the vdev itself.
3076 */
3077 static nvlist_t *
vdev_to_nvlist_iter(nvlist_t * nv,nvlist_t * search,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)3078 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
3079 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
3080 {
3081 uint_t c, children;
3082 nvlist_t **child;
3083 nvlist_t *ret;
3084 uint64_t is_log;
3085 const char *srchkey;
3086 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
3087 const char *tmp = NULL;
3088 boolean_t is_root;
3089
3090 /* Nothing to look for */
3091 if (search == NULL || pair == NULL)
3092 return (NULL);
3093
3094 /* Obtain the key we will use to search */
3095 srchkey = nvpair_name(pair);
3096
3097 nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &tmp);
3098 if (strcmp(tmp, "root") == 0)
3099 is_root = B_TRUE;
3100 else
3101 is_root = B_FALSE;
3102
3103 switch (nvpair_type(pair)) {
3104 case DATA_TYPE_UINT64:
3105 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
3106 uint64_t srchval = fnvpair_value_uint64(pair);
3107 uint64_t theguid = fnvlist_lookup_uint64(nv,
3108 ZPOOL_CONFIG_GUID);
3109 if (theguid == srchval)
3110 return (nv);
3111 }
3112 break;
3113
3114 case DATA_TYPE_STRING: {
3115 const char *srchval, *val;
3116
3117 srchval = fnvpair_value_string(pair);
3118 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
3119 break;
3120
3121 /*
3122 * Search for the requested value. Special cases:
3123 *
3124 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
3125 * "-part1", or "p1". The suffix is hidden from the user,
3126 * but included in the string, so this matches around it.
3127 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
3128 * is used to check all possible expanded paths.
3129 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
3130 *
3131 * Otherwise, all other searches are simple string compares.
3132 */
3133 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
3134 uint64_t wholedisk = 0;
3135
3136 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3137 &wholedisk);
3138 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
3139 return (nv);
3140
3141 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0) {
3142 char *type, *idx, *end, *p;
3143 uint64_t id, vdev_id;
3144
3145 /*
3146 * Determine our vdev type, keeping in mind
3147 * that the srchval is composed of a type and
3148 * vdev id pair (i.e. mirror-4).
3149 */
3150 if ((type = strdup(srchval)) == NULL)
3151 return (NULL);
3152
3153 if ((p = strrchr(type, '-')) == NULL) {
3154 free(type);
3155 break;
3156 }
3157 idx = p + 1;
3158 *p = '\0';
3159
3160 /*
3161 * draid names are presented like: draid2:4d:6c:0s
3162 * We match them up to the first ':' so we can still
3163 * do the parity check below, but the other params
3164 * are ignored.
3165 */
3166 if ((p = strchr(type, ':')) != NULL) {
3167 if (strncmp(type, VDEV_TYPE_DRAID,
3168 strlen(VDEV_TYPE_DRAID)) == 0)
3169 *p = '\0';
3170 }
3171
3172 /*
3173 * If the types don't match then keep looking.
3174 */
3175 if (strncmp(val, type, strlen(val)) != 0) {
3176 free(type);
3177 break;
3178 }
3179
3180 verify(zpool_vdev_is_interior(type));
3181
3182 id = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID);
3183 errno = 0;
3184 vdev_id = strtoull(idx, &end, 10);
3185
3186 /*
3187 * If we are looking for a raidz and a parity is
3188 * specified, make sure it matches.
3189 */
3190 int rzlen = strlen(VDEV_TYPE_RAIDZ);
3191 assert(rzlen == strlen(VDEV_TYPE_DRAID));
3192 int typlen = strlen(type);
3193 if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
3194 strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
3195 typlen != rzlen) {
3196 uint64_t vdev_parity;
3197 int parity = *(type + rzlen) - '0';
3198
3199 if (parity <= 0 || parity > 3 ||
3200 (typlen - rzlen) != 1) {
3201 /*
3202 * Nonsense parity specified, can
3203 * never match
3204 */
3205 free(type);
3206 return (NULL);
3207 }
3208 vdev_parity = fnvlist_lookup_uint64(nv,
3209 ZPOOL_CONFIG_NPARITY);
3210 if ((int)vdev_parity != parity) {
3211 free(type);
3212 break;
3213 }
3214 }
3215
3216 free(type);
3217 if (errno != 0)
3218 return (NULL);
3219
3220 /*
3221 * Now verify that we have the correct vdev id.
3222 */
3223 if (vdev_id == id)
3224 return (nv);
3225 }
3226
3227 /*
3228 * Common case
3229 */
3230 if (strcmp(srchval, val) == 0)
3231 return (nv);
3232 break;
3233 }
3234
3235 default:
3236 break;
3237 }
3238
3239 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
3240 &child, &children) != 0)
3241 return (NULL);
3242
3243 for (c = 0; c < children; c++) {
3244 if ((ret = vdev_to_nvlist_iter(child[c], search,
3245 avail_spare, l2cache, NULL, return_parent)) != NULL) {
3246 /*
3247 * The 'is_log' value is only set for the toplevel
3248 * vdev, not the leaf vdevs. So we always lookup the
3249 * log device from the root of the vdev tree (where
3250 * 'log' is non-NULL).
3251 */
3252 if (log != NULL &&
3253 nvlist_lookup_uint64(child[c],
3254 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
3255 is_log) {
3256 *log = B_TRUE;
3257 }
3258 return (ret && return_parent && !is_root ? nv : ret);
3259 }
3260 }
3261
3262 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
3263 &child, &children) == 0) {
3264 for (c = 0; c < children; c++) {
3265 if ((ret = vdev_to_nvlist_iter(child[c], search,
3266 avail_spare, l2cache, NULL, return_parent))
3267 != NULL) {
3268 *avail_spare = B_TRUE;
3269 return (ret && return_parent &&
3270 !is_root ? nv : ret);
3271 }
3272 }
3273 }
3274
3275 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
3276 &child, &children) == 0) {
3277 for (c = 0; c < children; c++) {
3278 if ((ret = vdev_to_nvlist_iter(child[c], search,
3279 avail_spare, l2cache, NULL, return_parent))
3280 != NULL) {
3281 *l2cache = B_TRUE;
3282 return (ret && return_parent &&
3283 !is_root ? nv : ret);
3284 }
3285 }
3286 }
3287
3288 return (NULL);
3289 }
3290
3291 /*
3292 * Given a physical path or guid, find the associated vdev.
3293 */
3294 nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t * zhp,const char * ppath,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3295 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
3296 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3297 {
3298 nvlist_t *search, *nvroot, *ret;
3299 uint64_t guid;
3300 char *end;
3301
3302 search = fnvlist_alloc();
3303
3304 guid = strtoull(ppath, &end, 0);
3305 if (guid != 0 && *end == '\0') {
3306 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3307 } else {
3308 fnvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath);
3309 }
3310
3311 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3312 ZPOOL_CONFIG_VDEV_TREE);
3313
3314 *avail_spare = B_FALSE;
3315 *l2cache = B_FALSE;
3316 if (log != NULL)
3317 *log = B_FALSE;
3318 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3319 B_FALSE);
3320 fnvlist_free(search);
3321
3322 return (ret);
3323 }
3324
3325 /*
3326 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
3327 */
3328 static boolean_t
zpool_vdev_is_interior(const char * name)3329 zpool_vdev_is_interior(const char *name)
3330 {
3331 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
3332 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
3333 strncmp(name,
3334 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
3335 strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
3336 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
3337 return (B_TRUE);
3338
3339 if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
3340 !zpool_is_draid_spare(name))
3341 return (B_TRUE);
3342
3343 return (B_FALSE);
3344 }
3345
3346 /*
3347 * Lookup the nvlist for a given vdev or vdev's parent (depending on
3348 * if 'return_parent' is set).
3349 */
3350 static nvlist_t *
__zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log,boolean_t return_parent)3351 __zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3352 boolean_t *l2cache, boolean_t *log, boolean_t return_parent)
3353 {
3354 char *end;
3355 nvlist_t *nvroot, *search, *ret;
3356 uint64_t guid;
3357 boolean_t __avail_spare, __l2cache, __log;
3358
3359 search = fnvlist_alloc();
3360
3361 guid = strtoull(path, &end, 0);
3362 if (guid != 0 && *end == '\0') {
3363 fnvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid);
3364 } else if (zpool_vdev_is_interior(path)) {
3365 fnvlist_add_string(search, ZPOOL_CONFIG_TYPE, path);
3366 } else {
3367 fnvlist_add_string(search, ZPOOL_CONFIG_PATH, path);
3368 }
3369
3370 nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
3371 ZPOOL_CONFIG_VDEV_TREE);
3372
3373 /*
3374 * User can pass NULL for avail_spare, l2cache, and log, but
3375 * we still need to provide variables to vdev_to_nvlist_iter(), so
3376 * just point them to junk variables here.
3377 */
3378 if (!avail_spare)
3379 avail_spare = &__avail_spare;
3380 if (!l2cache)
3381 l2cache = &__l2cache;
3382 if (!log)
3383 log = &__log;
3384
3385 *avail_spare = B_FALSE;
3386 *l2cache = B_FALSE;
3387 if (log != NULL)
3388 *log = B_FALSE;
3389 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log,
3390 return_parent);
3391 fnvlist_free(search);
3392
3393 return (ret);
3394 }
3395
3396 nvlist_t *
zpool_find_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3397 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
3398 boolean_t *l2cache, boolean_t *log)
3399 {
3400 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3401 B_FALSE));
3402 }
3403
3404 /* Given a vdev path, return its parent's nvlist */
3405 nvlist_t *
zpool_find_parent_vdev(zpool_handle_t * zhp,const char * path,boolean_t * avail_spare,boolean_t * l2cache,boolean_t * log)3406 zpool_find_parent_vdev(zpool_handle_t *zhp, const char *path,
3407 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
3408 {
3409 return (__zpool_find_vdev(zhp, path, avail_spare, l2cache, log,
3410 B_TRUE));
3411 }
3412
3413 /*
3414 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
3415 *
3416 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
3417 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
3418 * ignore them.
3419 */
3420 static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t * zhp,const char * path,boolean_t * is_spare,boolean_t * is_l2cache,boolean_t * is_log)3421 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
3422 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
3423 {
3424 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
3425 nvlist_t *tgt;
3426
3427 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
3428 &log)) == NULL)
3429 return (0);
3430
3431 if (is_spare != NULL)
3432 *is_spare = spare;
3433 if (is_l2cache != NULL)
3434 *is_l2cache = l2cache;
3435 if (is_log != NULL)
3436 *is_log = log;
3437
3438 return (fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID));
3439 }
3440
3441 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
3442 uint64_t
zpool_vdev_path_to_guid(zpool_handle_t * zhp,const char * path)3443 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
3444 {
3445 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
3446 }
3447
3448 /*
3449 * Bring the specified vdev online. The 'flags' parameter is a set of the
3450 * ZFS_ONLINE_* flags.
3451 */
3452 int
zpool_vdev_online(zpool_handle_t * zhp,const char * path,int flags,vdev_state_t * newstate)3453 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
3454 vdev_state_t *newstate)
3455 {
3456 zfs_cmd_t zc = {"\0"};
3457 char errbuf[ERRBUFLEN];
3458 nvlist_t *tgt;
3459 boolean_t avail_spare, l2cache, islog;
3460 libzfs_handle_t *hdl = zhp->zpool_hdl;
3461
3462 if (flags & ZFS_ONLINE_EXPAND) {
3463 (void) snprintf(errbuf, sizeof (errbuf),
3464 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
3465 } else {
3466 (void) snprintf(errbuf, sizeof (errbuf),
3467 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
3468 }
3469
3470 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3471 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3472 &islog)) == NULL)
3473 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3474
3475 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3476
3477 if (!(flags & ZFS_ONLINE_SPARE) && avail_spare)
3478 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3479
3480 #ifndef __FreeBSD__
3481 const char *pathname;
3482 if ((flags & ZFS_ONLINE_EXPAND ||
3483 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
3484 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
3485 uint64_t wholedisk = 0;
3486
3487 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
3488 &wholedisk);
3489
3490 /*
3491 * XXX - L2ARC 1.0 devices can't support expansion.
3492 */
3493 if (l2cache) {
3494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3495 "cannot expand cache devices"));
3496 return (zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf));
3497 }
3498
3499 if (wholedisk) {
3500 const char *fullpath = path;
3501 char buf[MAXPATHLEN];
3502 int error;
3503
3504 if (path[0] != '/') {
3505 error = zfs_resolve_shortname(path, buf,
3506 sizeof (buf));
3507 if (error != 0)
3508 return (zfs_error(hdl, EZFS_NODEVICE,
3509 errbuf));
3510
3511 fullpath = buf;
3512 }
3513
3514 error = zpool_relabel_disk(hdl, fullpath, errbuf);
3515 if (error != 0)
3516 return (error);
3517 }
3518 }
3519 #endif
3520
3521 zc.zc_cookie = VDEV_STATE_ONLINE;
3522 zc.zc_obj = flags;
3523
3524 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
3525 if (errno == EINVAL) {
3526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
3527 "from this pool into a new one. Use '%s' "
3528 "instead"), "zpool detach");
3529 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, errbuf));
3530 }
3531 return (zpool_standard_error(hdl, errno, errbuf));
3532 }
3533
3534 *newstate = zc.zc_cookie;
3535 return (0);
3536 }
3537
3538 /*
3539 * Take the specified vdev offline
3540 */
3541 int
zpool_vdev_offline(zpool_handle_t * zhp,const char * path,boolean_t istmp)3542 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
3543 {
3544 zfs_cmd_t zc = {"\0"};
3545 char errbuf[ERRBUFLEN];
3546 nvlist_t *tgt;
3547 boolean_t avail_spare, l2cache;
3548 libzfs_handle_t *hdl = zhp->zpool_hdl;
3549
3550 (void) snprintf(errbuf, sizeof (errbuf),
3551 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3552
3553 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3554 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3555 NULL)) == NULL)
3556 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3557
3558 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3559
3560 if (avail_spare)
3561 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3562
3563 zc.zc_cookie = VDEV_STATE_OFFLINE;
3564 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3565
3566 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3567 return (0);
3568
3569 switch (errno) {
3570 case EBUSY:
3571
3572 /*
3573 * There are no other replicas of this device.
3574 */
3575 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3576
3577 case EEXIST:
3578 /*
3579 * The log device has unplayed logs
3580 */
3581 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, errbuf));
3582
3583 default:
3584 return (zpool_standard_error(hdl, errno, errbuf));
3585 }
3586 }
3587
3588 /*
3589 * Remove the specified vdev asynchronously from the configuration, so
3590 * that it may come ONLINE if reinserted. This is called from zed on
3591 * Udev remove event.
3592 * Note: We also have a similar function zpool_vdev_remove() that
3593 * removes the vdev from the pool.
3594 */
3595 int
zpool_vdev_remove_wanted(zpool_handle_t * zhp,const char * path)3596 zpool_vdev_remove_wanted(zpool_handle_t *zhp, const char *path)
3597 {
3598 zfs_cmd_t zc = {"\0"};
3599 char errbuf[ERRBUFLEN];
3600 nvlist_t *tgt;
3601 boolean_t avail_spare, l2cache;
3602 libzfs_handle_t *hdl = zhp->zpool_hdl;
3603
3604 (void) snprintf(errbuf, sizeof (errbuf),
3605 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3606
3607 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3608 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3609 NULL)) == NULL)
3610 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3611
3612 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3613
3614 zc.zc_cookie = VDEV_STATE_REMOVED;
3615
3616 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3617 return (0);
3618
3619 return (zpool_standard_error(hdl, errno, errbuf));
3620 }
3621
3622 /*
3623 * Mark the given vdev faulted.
3624 */
3625 int
zpool_vdev_fault(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3626 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3627 {
3628 zfs_cmd_t zc = {"\0"};
3629 char errbuf[ERRBUFLEN];
3630 libzfs_handle_t *hdl = zhp->zpool_hdl;
3631 nvlist_t *vdev_nv;
3632 boolean_t avail_spare, l2cache;
3633 char *vdev_name;
3634 char guid_str[21]; /* 64-bit num + '\0' */
3635 boolean_t is_draid_spare = B_FALSE;
3636 const char *vdev_type;
3637
3638 (void) snprintf(errbuf, sizeof (errbuf),
3639 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3640
3641 snprintf(guid_str, sizeof (guid_str), "%llu", (u_longlong_t)guid);
3642 if ((vdev_nv = zpool_find_vdev(zhp, guid_str, &avail_spare,
3643 &l2cache, NULL)) == NULL)
3644 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3645
3646 vdev_name = zpool_vdev_name(hdl, zhp, vdev_nv, 0);
3647 if (vdev_name != NULL) {
3648 /*
3649 * We have the actual vdev name, so use that instead of the GUID
3650 * in any error messages.
3651 */
3652 (void) snprintf(errbuf, sizeof (errbuf),
3653 dgettext(TEXT_DOMAIN, "cannot fault %s"), vdev_name);
3654 free(vdev_name);
3655 }
3656
3657 /*
3658 * Spares (traditional or draid) cannot be faulted by libzfs, except:
3659 *
3660 * - Any spare type that exceeds it's errors can be faulted (aux =
3661 * VDEV_AUX_ERR_EXCEEDED). This is only used by zed.
3662 *
3663 * - Traditional spares that are active can be force faulted.
3664 */
3665 if (nvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_TYPE, &vdev_type) == 0)
3666 if (strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0)
3667 is_draid_spare = B_TRUE;
3668
3669 /*
3670 * If vdev is a spare that is not being used, or is a dRAID spare (in
3671 * use or not), then don't allow it to be force-faulted. However, an
3672 * in-use dRAID spare can be faulted by ZED if see too many errors
3673 * (aux = VDEV_AUX_ERR_EXCEEDED).
3674 */
3675 if (avail_spare || (is_draid_spare && aux != VDEV_AUX_ERR_EXCEEDED))
3676 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3677
3678 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3679 zc.zc_guid = guid;
3680 zc.zc_cookie = VDEV_STATE_FAULTED;
3681 zc.zc_obj = aux;
3682
3683 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3684 return (0);
3685
3686 switch (errno) {
3687 case EBUSY:
3688
3689 /*
3690 * There are no other replicas of this device.
3691 */
3692 return (zfs_error(hdl, EZFS_NOREPLICAS, errbuf));
3693
3694 default:
3695 return (zpool_standard_error(hdl, errno, errbuf));
3696 }
3697
3698 }
3699
3700 /*
3701 * Generic set vdev state function
3702 */
3703 static int
zpool_vdev_set_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux,vdev_state_t state)3704 zpool_vdev_set_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux,
3705 vdev_state_t state)
3706 {
3707 zfs_cmd_t zc = {"\0"};
3708 char errbuf[ERRBUFLEN];
3709 libzfs_handle_t *hdl = zhp->zpool_hdl;
3710
3711 (void) snprintf(errbuf, sizeof (errbuf),
3712 dgettext(TEXT_DOMAIN, "cannot set %s %llu"),
3713 zpool_state_to_name(state, aux), (u_longlong_t)guid);
3714
3715 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3716 zc.zc_guid = guid;
3717 zc.zc_cookie = state;
3718 zc.zc_obj = aux;
3719
3720 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3721 return (0);
3722
3723 return (zpool_standard_error(hdl, errno, errbuf));
3724 }
3725
3726 /*
3727 * Mark the given vdev degraded.
3728 */
3729 int
zpool_vdev_degrade(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3730 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3731 {
3732 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_DEGRADED));
3733 }
3734
3735 /*
3736 * Mark the given vdev as in a removed state (as if the device does not exist).
3737 *
3738 * This is different than zpool_vdev_remove() which does a removal of a device
3739 * from the pool (but the device does exist).
3740 */
3741 int
zpool_vdev_set_removed_state(zpool_handle_t * zhp,uint64_t guid,vdev_aux_t aux)3742 zpool_vdev_set_removed_state(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3743 {
3744 return (zpool_vdev_set_state(zhp, guid, aux, VDEV_STATE_REMOVED));
3745 }
3746
3747 /*
3748 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3749 * a hot spare.
3750 */
3751 static boolean_t
is_replacing_spare(nvlist_t * search,nvlist_t * tgt,int which)3752 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3753 {
3754 nvlist_t **child;
3755 uint_t c, children;
3756
3757 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3758 &children) == 0) {
3759 const char *type = fnvlist_lookup_string(search,
3760 ZPOOL_CONFIG_TYPE);
3761 if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
3762 strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
3763 children == 2 && child[which] == tgt)
3764 return (B_TRUE);
3765
3766 for (c = 0; c < children; c++)
3767 if (is_replacing_spare(child[c], tgt, which))
3768 return (B_TRUE);
3769 }
3770
3771 return (B_FALSE);
3772 }
3773
3774 /*
3775 * Attach new_disk (fully described by nvroot) to old_disk.
3776 * If 'replacing' is specified, the new disk will replace the old one.
3777 */
3778 int
zpool_vdev_attach(zpool_handle_t * zhp,const char * old_disk,const char * new_disk,nvlist_t * nvroot,int replacing,boolean_t rebuild)3779 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3780 const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3781 {
3782 zfs_cmd_t zc = {"\0"};
3783 char errbuf[ERRBUFLEN];
3784 int ret;
3785 nvlist_t *tgt;
3786 boolean_t avail_spare, l2cache, islog;
3787 uint64_t val;
3788 char *newname;
3789 const char *type;
3790 nvlist_t **child;
3791 uint_t children;
3792 nvlist_t *config_root;
3793 libzfs_handle_t *hdl = zhp->zpool_hdl;
3794
3795 if (replacing)
3796 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3797 "cannot replace %s with %s"), old_disk, new_disk);
3798 else
3799 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3800 "cannot attach %s to %s"), new_disk, old_disk);
3801
3802 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3803 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3804 &islog)) == NULL)
3805 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
3806
3807 if (avail_spare)
3808 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
3809
3810 if (l2cache)
3811 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
3812
3813 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3814 zc.zc_cookie = replacing;
3815 zc.zc_simple = rebuild;
3816
3817 if (rebuild &&
3818 zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3819 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3820 "the loaded zfs module doesn't support device rebuilds"));
3821 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3822 }
3823
3824 type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);
3825 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&
3826 zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {
3827 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3828 "the loaded zfs module doesn't support raidz expansion"));
3829 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3830 }
3831
3832 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3833 &child, &children) != 0 || children != 1) {
3834 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3835 "new device must be a single disk"));
3836 return (zfs_error(hdl, EZFS_INVALCONFIG, errbuf));
3837 }
3838
3839 config_root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3840 ZPOOL_CONFIG_VDEV_TREE);
3841
3842 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3843 return (-1);
3844
3845 /*
3846 * If the target is a hot spare that has been swapped in, we can only
3847 * replace it with another hot spare.
3848 */
3849 if (replacing &&
3850 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3851 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3852 NULL) == NULL || !avail_spare) &&
3853 is_replacing_spare(config_root, tgt, 1)) {
3854 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3855 "can only be replaced by another hot spare"));
3856 free(newname);
3857 return (zfs_error(hdl, EZFS_BADTARGET, errbuf));
3858 }
3859
3860 free(newname);
3861
3862 zcmd_write_conf_nvlist(hdl, &zc, nvroot);
3863
3864 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3865
3866 zcmd_free_nvlists(&zc);
3867
3868 if (ret == 0)
3869 return (0);
3870
3871 switch (errno) {
3872 case ENOTSUP:
3873 /*
3874 * Can't attach to or replace this type of vdev.
3875 */
3876 if (replacing) {
3877 uint64_t version = zpool_get_prop_int(zhp,
3878 ZPOOL_PROP_VERSION, NULL);
3879
3880 if (islog) {
3881 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3882 "cannot replace a log with a spare"));
3883 } else if (rebuild) {
3884 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3885 "only mirror and dRAID vdevs support "
3886 "sequential reconstruction"));
3887 } else if (zpool_is_draid_spare(new_disk)) {
3888 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3889 "dRAID spares can only replace child "
3890 "devices in their parent's dRAID vdev"));
3891 } else if (version >= SPA_VERSION_MULTI_REPLACE) {
3892 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3893 "already in replacing/spare config; wait "
3894 "for completion or use 'zpool detach'"));
3895 } else {
3896 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3897 "cannot replace a replacing device"));
3898 }
3899 } else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3900 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3901 "raidz_expansion feature must be enabled "
3902 "in order to attach a device to raidz"));
3903 } else {
3904 char status[64] = {0};
3905 zpool_prop_get_feature(zhp,
3906 "feature@device_rebuild", status, 63);
3907 if (rebuild &&
3908 strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
3909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3910 "device_rebuild feature must be enabled "
3911 "in order to use sequential "
3912 "reconstruction"));
3913 } else {
3914 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3915 "can only attach to mirrors and top-level "
3916 "disks"));
3917 }
3918 }
3919 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3920 break;
3921
3922 case EINVAL:
3923 /*
3924 * The new device must be a single disk.
3925 */
3926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3927 "new device must be a single disk"));
3928 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3929 break;
3930
3931 case EBUSY:
3932 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
3933 new_disk);
3934 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3935 break;
3936
3937 case EOVERFLOW:
3938 /*
3939 * The new device is too small.
3940 */
3941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3942 "device is too small"));
3943 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3944 break;
3945
3946 case EDOM:
3947 /*
3948 * The new device has a different optimal sector size.
3949 */
3950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3951 "new device has a different optimal sector size; use the "
3952 "option '-o ashift=N' to override the optimal size"));
3953 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3954 break;
3955
3956 case ENAMETOOLONG:
3957 /*
3958 * The resulting top-level vdev spec won't fit in the label.
3959 */
3960 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3961 break;
3962
3963 case ENXIO:
3964 /*
3965 * The existing raidz vdev has offline children
3966 */
3967 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3968 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3969 "raidz vdev has devices that are are offline or "
3970 "being replaced"));
3971 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3972 break;
3973 } else {
3974 (void) zpool_standard_error(hdl, errno, errbuf);
3975 }
3976 break;
3977
3978 case EADDRINUSE:
3979 /*
3980 * The boot reserved area is already being used (FreeBSD)
3981 */
3982 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
3983 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3984 "the reserved boot area needed for the expansion "
3985 "is already being used by a boot loader"));
3986 (void) zfs_error(hdl, EZFS_BADDEV, errbuf);
3987 } else {
3988 (void) zpool_standard_error(hdl, errno, errbuf);
3989 }
3990 break;
3991
3992 case ZFS_ERR_ASHIFT_MISMATCH:
3993 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3994 "The new device cannot have a higher alignment requirement "
3995 "than the top-level vdev."));
3996 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
3997 break;
3998 default:
3999 (void) zpool_standard_error(hdl, errno, errbuf);
4000 }
4001
4002 return (-1);
4003 }
4004
4005 /*
4006 * Detach the specified device.
4007 */
4008 int
zpool_vdev_detach(zpool_handle_t * zhp,const char * path)4009 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
4010 {
4011 zfs_cmd_t zc = {"\0"};
4012 char errbuf[ERRBUFLEN];
4013 nvlist_t *tgt;
4014 boolean_t avail_spare, l2cache;
4015 libzfs_handle_t *hdl = zhp->zpool_hdl;
4016
4017 (void) snprintf(errbuf, sizeof (errbuf),
4018 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
4019
4020 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4021 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4022 NULL)) == NULL)
4023 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4024
4025 if (avail_spare)
4026 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
4027
4028 if (l2cache)
4029 return (zfs_error(hdl, EZFS_ISL2CACHE, errbuf));
4030
4031 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4032
4033 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
4034 return (0);
4035
4036 switch (errno) {
4037
4038 case ENOTSUP:
4039 /*
4040 * Can't detach from this type of vdev.
4041 */
4042 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
4043 "applicable to mirror and replacing vdevs"));
4044 (void) zfs_error(hdl, EZFS_BADTARGET, errbuf);
4045 break;
4046
4047 case EBUSY:
4048 /*
4049 * There are no other replicas of this device.
4050 */
4051 (void) zfs_error(hdl, EZFS_NOREPLICAS, errbuf);
4052 break;
4053
4054 default:
4055 (void) zpool_standard_error(hdl, errno, errbuf);
4056 }
4057
4058 return (-1);
4059 }
4060
4061 /*
4062 * Find a mirror vdev in the source nvlist.
4063 *
4064 * The mchild array contains a list of disks in one of the top-level mirrors
4065 * of the source pool. The schild array contains a list of disks that the
4066 * user specified on the command line. We loop over the mchild array to
4067 * see if any entry in the schild array matches.
4068 *
4069 * If a disk in the mchild array is found in the schild array, we return
4070 * the index of that entry. Otherwise we return -1.
4071 */
4072 static int
find_vdev_entry(zpool_handle_t * zhp,nvlist_t ** mchild,uint_t mchildren,nvlist_t ** schild,uint_t schildren)4073 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
4074 nvlist_t **schild, uint_t schildren)
4075 {
4076 uint_t mc;
4077
4078 for (mc = 0; mc < mchildren; mc++) {
4079 uint_t sc;
4080 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
4081 mchild[mc], 0);
4082
4083 for (sc = 0; sc < schildren; sc++) {
4084 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
4085 schild[sc], 0);
4086 boolean_t result = (strcmp(mpath, spath) == 0);
4087
4088 free(spath);
4089 if (result) {
4090 free(mpath);
4091 return (mc);
4092 }
4093 }
4094
4095 free(mpath);
4096 }
4097
4098 return (-1);
4099 }
4100
4101 /*
4102 * Split a mirror pool. If newroot points to null, then a new nvlist
4103 * is generated and it is the responsibility of the caller to free it.
4104 */
4105 int
zpool_vdev_split(zpool_handle_t * zhp,char * newname,nvlist_t ** newroot,nvlist_t * props,splitflags_t flags)4106 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
4107 nvlist_t *props, splitflags_t flags)
4108 {
4109 zfs_cmd_t zc = {"\0"};
4110 char errbuf[ERRBUFLEN];
4111 const char *bias;
4112 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
4113 nvlist_t **varray = NULL, *zc_props = NULL;
4114 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
4115 libzfs_handle_t *hdl = zhp->zpool_hdl;
4116 uint64_t vers, readonly = B_FALSE;
4117 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
4118 int retval = 0;
4119
4120 (void) snprintf(errbuf, sizeof (errbuf),
4121 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
4122
4123 if (!zpool_name_valid(hdl, B_FALSE, newname))
4124 return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
4125
4126 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
4127 (void) fprintf(stderr, gettext("Internal error: unable to "
4128 "retrieve pool configuration\n"));
4129 return (-1);
4130 }
4131
4132 tree = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
4133 vers = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
4134
4135 if (props) {
4136 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
4137 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
4138 props, vers, flags, errbuf)) == NULL)
4139 return (-1);
4140 (void) nvlist_lookup_uint64(zc_props,
4141 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
4142 if (readonly) {
4143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4144 "property %s can only be set at import time"),
4145 zpool_prop_to_name(ZPOOL_PROP_READONLY));
4146 return (-1);
4147 }
4148 }
4149
4150 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
4151 &children) != 0) {
4152 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4153 "Source pool is missing vdev tree"));
4154 nvlist_free(zc_props);
4155 return (-1);
4156 }
4157
4158 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
4159 vcount = 0;
4160
4161 if (*newroot == NULL ||
4162 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
4163 &newchild, &newchildren) != 0)
4164 newchildren = 0;
4165
4166 for (c = 0; c < children; c++) {
4167 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
4168 boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
4169 const char *type;
4170 nvlist_t **mchild, *vdev;
4171 uint_t mchildren;
4172 int entry;
4173
4174 /*
4175 * Unlike cache & spares, slogs are stored in the
4176 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
4177 */
4178 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
4179 &is_log);
4180 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
4181 &is_hole);
4182 if (is_log || is_hole) {
4183 /*
4184 * Create a hole vdev and put it in the config.
4185 */
4186 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
4187 goto out;
4188 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
4189 VDEV_TYPE_HOLE) != 0)
4190 goto out;
4191 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
4192 1) != 0)
4193 goto out;
4194 if (lastlog == 0)
4195 lastlog = vcount;
4196 varray[vcount++] = vdev;
4197 continue;
4198 }
4199 lastlog = 0;
4200 type = fnvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE);
4201
4202 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
4203 vdev = child[c];
4204 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
4205 goto out;
4206 continue;
4207 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
4208 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4209 "Source pool must be composed only of mirrors\n"));
4210 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4211 goto out;
4212 }
4213
4214 if (nvlist_lookup_string(child[c],
4215 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
4216 if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
4217 is_special = B_TRUE;
4218 else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
4219 is_dedup = B_TRUE;
4220 }
4221 verify(nvlist_lookup_nvlist_array(child[c],
4222 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
4223
4224 /* find or add an entry for this top-level vdev */
4225 if (newchildren > 0 &&
4226 (entry = find_vdev_entry(zhp, mchild, mchildren,
4227 newchild, newchildren)) >= 0) {
4228 /* We found a disk that the user specified. */
4229 vdev = mchild[entry];
4230 ++found;
4231 } else {
4232 /* User didn't specify a disk for this vdev. */
4233 vdev = mchild[mchildren - 1];
4234 }
4235
4236 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
4237 goto out;
4238
4239 if (flags.dryrun != 0) {
4240 if (is_dedup == B_TRUE) {
4241 if (nvlist_add_string(varray[vcount - 1],
4242 ZPOOL_CONFIG_ALLOCATION_BIAS,
4243 VDEV_ALLOC_BIAS_DEDUP) != 0)
4244 goto out;
4245 } else if (is_special == B_TRUE) {
4246 if (nvlist_add_string(varray[vcount - 1],
4247 ZPOOL_CONFIG_ALLOCATION_BIAS,
4248 VDEV_ALLOC_BIAS_SPECIAL) != 0)
4249 goto out;
4250 }
4251 }
4252 }
4253
4254 /* did we find every disk the user specified? */
4255 if (found != newchildren) {
4256 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
4257 "include at most one disk from each mirror"));
4258 retval = zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4259 goto out;
4260 }
4261
4262 /* Prepare the nvlist for populating. */
4263 if (*newroot == NULL) {
4264 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
4265 goto out;
4266 freelist = B_TRUE;
4267 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
4268 VDEV_TYPE_ROOT) != 0)
4269 goto out;
4270 } else {
4271 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
4272 }
4273
4274 /* Add all the children we found */
4275 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
4276 (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
4277 goto out;
4278
4279 /*
4280 * If we're just doing a dry run, exit now with success.
4281 */
4282 if (flags.dryrun) {
4283 memory_err = B_FALSE;
4284 freelist = B_FALSE;
4285 goto out;
4286 }
4287
4288 /* now build up the config list & call the ioctl */
4289 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
4290 goto out;
4291
4292 if (nvlist_add_nvlist(newconfig,
4293 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
4294 nvlist_add_string(newconfig,
4295 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
4296 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
4297 goto out;
4298
4299 /*
4300 * The new pool is automatically part of the namespace unless we
4301 * explicitly export it.
4302 */
4303 if (!flags.import)
4304 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
4305 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4306 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
4307 zcmd_write_conf_nvlist(hdl, &zc, newconfig);
4308 if (zc_props != NULL)
4309 zcmd_write_src_nvlist(hdl, &zc, zc_props);
4310
4311 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
4312 retval = zpool_standard_error(hdl, errno, errbuf);
4313 goto out;
4314 }
4315
4316 freelist = B_FALSE;
4317 memory_err = B_FALSE;
4318
4319 out:
4320 if (varray != NULL) {
4321 int v;
4322
4323 for (v = 0; v < vcount; v++)
4324 nvlist_free(varray[v]);
4325 free(varray);
4326 }
4327 zcmd_free_nvlists(&zc);
4328 nvlist_free(zc_props);
4329 nvlist_free(newconfig);
4330 if (freelist) {
4331 nvlist_free(*newroot);
4332 *newroot = NULL;
4333 }
4334
4335 if (retval != 0)
4336 return (retval);
4337
4338 if (memory_err)
4339 return (no_memory(hdl));
4340
4341 return (0);
4342 }
4343
4344 /*
4345 * Remove the given device.
4346 */
4347 int
zpool_vdev_remove(zpool_handle_t * zhp,const char * path)4348 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
4349 {
4350 zfs_cmd_t zc = {"\0"};
4351 char errbuf[ERRBUFLEN];
4352 nvlist_t *tgt;
4353 boolean_t avail_spare, l2cache, islog;
4354 libzfs_handle_t *hdl = zhp->zpool_hdl;
4355 uint64_t version;
4356
4357 (void) snprintf(errbuf, sizeof (errbuf),
4358 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
4359
4360 if (zpool_is_draid_spare(path)) {
4361 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4362 "dRAID spares cannot be removed"));
4363 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4364 }
4365
4366 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4367 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4368 &islog)) == NULL)
4369 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4370
4371 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
4372 if (islog && version < SPA_VERSION_HOLES) {
4373 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4374 "pool must be upgraded to support log removal"));
4375 return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
4376 }
4377
4378 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4379
4380 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4381 return (0);
4382
4383 switch (errno) {
4384
4385 case EALREADY:
4386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4387 "removal for this vdev is already in progress."));
4388 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4389 break;
4390
4391 case EINVAL:
4392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4393 "invalid config; all top-level vdevs must "
4394 "have the same sector size and not be raidz."));
4395 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4396 break;
4397
4398 case EBUSY:
4399 if (islog) {
4400 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4401 "Mount encrypted datasets to replay logs."));
4402 } else {
4403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4404 "Pool busy; removal may already be in progress"));
4405 }
4406 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4407 break;
4408
4409 case EACCES:
4410 if (islog) {
4411 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4412 "Mount encrypted datasets to replay logs."));
4413 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
4414 } else {
4415 (void) zpool_standard_error(hdl, errno, errbuf);
4416 }
4417 break;
4418
4419 default:
4420 (void) zpool_standard_error(hdl, errno, errbuf);
4421 }
4422 return (-1);
4423 }
4424
4425 int
zpool_vdev_remove_cancel(zpool_handle_t * zhp)4426 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
4427 {
4428 zfs_cmd_t zc = {{0}};
4429 char errbuf[ERRBUFLEN];
4430 libzfs_handle_t *hdl = zhp->zpool_hdl;
4431
4432 (void) snprintf(errbuf, sizeof (errbuf),
4433 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
4434
4435 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4436 zc.zc_cookie = 1;
4437
4438 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
4439 return (0);
4440
4441 return (zpool_standard_error(hdl, errno, errbuf));
4442 }
4443
4444 int
zpool_vdev_indirect_size(zpool_handle_t * zhp,const char * path,uint64_t * sizep)4445 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
4446 uint64_t *sizep)
4447 {
4448 char errbuf[ERRBUFLEN];
4449 nvlist_t *tgt;
4450 boolean_t avail_spare, l2cache, islog;
4451 libzfs_handle_t *hdl = zhp->zpool_hdl;
4452
4453 (void) snprintf(errbuf, sizeof (errbuf),
4454 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
4455 path);
4456
4457 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
4458 &islog)) == NULL)
4459 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4460
4461 if (avail_spare || l2cache || islog) {
4462 *sizep = 0;
4463 return (0);
4464 }
4465
4466 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
4467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4468 "indirect size not available"));
4469 return (zfs_error(hdl, EINVAL, errbuf));
4470 }
4471 return (0);
4472 }
4473
4474 /*
4475 * Clear the errors for the pool, or the particular device if specified.
4476 */
4477 int
zpool_clear(zpool_handle_t * zhp,const char * path,nvlist_t * rewindnvl)4478 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
4479 {
4480 zfs_cmd_t zc = {"\0"};
4481 char errbuf[ERRBUFLEN];
4482 nvlist_t *tgt;
4483 zpool_load_policy_t policy;
4484 boolean_t avail_spare, l2cache;
4485 libzfs_handle_t *hdl = zhp->zpool_hdl;
4486 nvlist_t *nvi = NULL;
4487 int error;
4488
4489 if (path)
4490 (void) snprintf(errbuf, sizeof (errbuf),
4491 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4492 path);
4493 else
4494 (void) snprintf(errbuf, sizeof (errbuf),
4495 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4496 zhp->zpool_name);
4497
4498 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4499 if (path) {
4500 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
4501 &l2cache, NULL)) == NULL)
4502 return (zfs_error(hdl, EZFS_NODEVICE, errbuf));
4503
4504 /*
4505 * Don't allow error clearing for hot spares. Do allow
4506 * error clearing for l2cache devices.
4507 */
4508 if (avail_spare)
4509 return (zfs_error(hdl, EZFS_ISSPARE, errbuf));
4510
4511 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
4512 }
4513
4514 zpool_get_load_policy(rewindnvl, &policy);
4515 zc.zc_cookie = policy.zlp_rewind;
4516
4517 zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2);
4518 if (rewindnvl != NULL)
4519 zcmd_write_src_nvlist(hdl, &zc, rewindnvl);
4520
4521 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
4522 errno == ENOMEM)
4523 zcmd_expand_dst_nvlist(hdl, &zc);
4524
4525 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
4526 errno != EPERM && errno != EACCES)) {
4527 if (policy.zlp_rewind &
4528 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
4529 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
4530 zpool_rewind_exclaim(hdl, zc.zc_name,
4531 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
4532 nvi);
4533 nvlist_free(nvi);
4534 }
4535 zcmd_free_nvlists(&zc);
4536 return (0);
4537 }
4538
4539 zcmd_free_nvlists(&zc);
4540 return (zpool_standard_error(hdl, errno, errbuf));
4541 }
4542
4543 /*
4544 * Similar to zpool_clear(), but takes a GUID (used by fmd).
4545 */
4546 int
zpool_vdev_clear(zpool_handle_t * zhp,uint64_t guid)4547 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
4548 {
4549 zfs_cmd_t zc = {"\0"};
4550 char errbuf[ERRBUFLEN];
4551 libzfs_handle_t *hdl = zhp->zpool_hdl;
4552
4553 (void) snprintf(errbuf, sizeof (errbuf),
4554 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
4555 (u_longlong_t)guid);
4556
4557 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4558 zc.zc_guid = guid;
4559 zc.zc_cookie = ZPOOL_NO_REWIND;
4560
4561 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
4562 return (0);
4563
4564 return (zpool_standard_error(hdl, errno, errbuf));
4565 }
4566
4567 /*
4568 * Change the GUID for a pool.
4569 *
4570 * Similar to zpool_reguid(), but may take a GUID.
4571 *
4572 * If the guid argument is NULL, then no GUID is passed in the nvlist to the
4573 * ioctl().
4574 */
4575 int
zpool_set_guid(zpool_handle_t * zhp,const uint64_t * guid)4576 zpool_set_guid(zpool_handle_t *zhp, const uint64_t *guid)
4577 {
4578 char errbuf[ERRBUFLEN];
4579 libzfs_handle_t *hdl = zhp->zpool_hdl;
4580 nvlist_t *nvl = NULL;
4581 zfs_cmd_t zc = {"\0"};
4582 int error;
4583
4584 if (guid != NULL) {
4585 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
4586 return (no_memory(hdl));
4587
4588 if (nvlist_add_uint64(nvl, ZPOOL_REGUID_GUID, *guid) != 0) {
4589 nvlist_free(nvl);
4590 return (no_memory(hdl));
4591 }
4592
4593 zcmd_write_src_nvlist(hdl, &zc, nvl);
4594 }
4595
4596 (void) snprintf(errbuf, sizeof (errbuf),
4597 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
4598
4599 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4600 error = zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc);
4601 if (error) {
4602 return (zpool_standard_error(hdl, errno, errbuf));
4603 }
4604 if (guid != NULL) {
4605 zcmd_free_nvlists(&zc);
4606 nvlist_free(nvl);
4607 }
4608 return (0);
4609 }
4610
4611 /*
4612 * Change the GUID for a pool.
4613 */
4614 int
zpool_reguid(zpool_handle_t * zhp)4615 zpool_reguid(zpool_handle_t *zhp)
4616 {
4617 return (zpool_set_guid(zhp, NULL));
4618 }
4619
4620 /*
4621 * Reopen the pool.
4622 */
4623 int
zpool_reopen_one(zpool_handle_t * zhp,void * data)4624 zpool_reopen_one(zpool_handle_t *zhp, void *data)
4625 {
4626 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4627 const char *pool_name = zpool_get_name(zhp);
4628 boolean_t *scrub_restart = data;
4629 int error;
4630
4631 error = lzc_reopen(pool_name, *scrub_restart);
4632 if (error) {
4633 return (zpool_standard_error_fmt(hdl, error,
4634 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
4635 }
4636
4637 return (0);
4638 }
4639
4640 /* call into libzfs_core to execute the sync IOCTL per pool */
4641 int
zpool_sync_one(zpool_handle_t * zhp,void * data)4642 zpool_sync_one(zpool_handle_t *zhp, void *data)
4643 {
4644 int ret;
4645 libzfs_handle_t *hdl = zpool_get_handle(zhp);
4646 const char *pool_name = zpool_get_name(zhp);
4647 boolean_t *force = data;
4648 nvlist_t *innvl = fnvlist_alloc();
4649
4650 fnvlist_add_boolean_value(innvl, "force", *force);
4651 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
4652 nvlist_free(innvl);
4653 return (zpool_standard_error_fmt(hdl, ret,
4654 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
4655 }
4656 nvlist_free(innvl);
4657
4658 return (0);
4659 }
4660
4661 #define PATH_BUF_LEN 64
4662
4663 /*
4664 * Given a vdev, return the name to display in iostat. If the vdev has a path,
4665 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
4666 * We also check if this is a whole disk, in which case we strip off the
4667 * trailing 's0' slice name.
4668 *
4669 * This routine is also responsible for identifying when disks have been
4670 * reconfigured in a new location. The kernel will have opened the device by
4671 * devid, but the path will still refer to the old location. To catch this, we
4672 * first do a path -> devid translation (which is fast for the common case). If
4673 * the devid matches, we're done. If not, we do a reverse devid -> path
4674 * translation and issue the appropriate ioctl() to update the path of the vdev.
4675 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
4676 * of these checks.
4677 */
4678 char *
zpool_vdev_name(libzfs_handle_t * hdl,zpool_handle_t * zhp,nvlist_t * nv,int name_flags)4679 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
4680 int name_flags)
4681 {
4682 const char *type, *tpath;
4683 const char *path;
4684 uint64_t value;
4685 char buf[PATH_BUF_LEN];
4686 char tmpbuf[PATH_BUF_LEN * 2];
4687
4688 /*
4689 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
4690 * zpool name that will be displayed to the user.
4691 */
4692 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
4693 if (zhp != NULL && strcmp(type, "root") == 0)
4694 return (zfs_strdup(hdl, zpool_get_name(zhp)));
4695
4696 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_PATH"))
4697 name_flags |= VDEV_NAME_PATH;
4698 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_GUID"))
4699 name_flags |= VDEV_NAME_GUID;
4700 if (libzfs_envvar_is_set("ZPOOL_VDEV_NAME_FOLLOW_LINKS"))
4701 name_flags |= VDEV_NAME_FOLLOW_LINKS;
4702
4703 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
4704 name_flags & VDEV_NAME_GUID) {
4705 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
4706 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
4707 path = buf;
4708 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tpath) == 0) {
4709 path = tpath;
4710
4711 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
4712 char *rp = realpath(path, NULL);
4713 if (rp) {
4714 strlcpy(buf, rp, sizeof (buf));
4715 path = buf;
4716 free(rp);
4717 }
4718 }
4719
4720 /*
4721 * For a block device only use the name.
4722 */
4723 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4724 !(name_flags & VDEV_NAME_PATH)) {
4725 path = zfs_strip_path(path);
4726 }
4727
4728 /*
4729 * Remove the partition from the path if this is a whole disk.
4730 */
4731 if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
4732 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4733 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
4734 return (zfs_strip_partition(path));
4735 }
4736 } else {
4737 path = type;
4738
4739 /*
4740 * If it's a raidz device, we need to stick in the parity level.
4741 */
4742 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4743 value = fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY);
4744 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
4745 (u_longlong_t)value);
4746 path = buf;
4747 }
4748
4749 /*
4750 * If it's a dRAID device, we add parity, groups, and spares.
4751 */
4752 if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
4753 uint64_t ndata, nparity, nspares, children;
4754 nvlist_t **child;
4755 uint_t width;
4756
4757 verify(nvlist_lookup_nvlist_array(nv,
4758 ZPOOL_CONFIG_CHILDREN, &child, &width) == 0);
4759 nparity = fnvlist_lookup_uint64(nv,
4760 ZPOOL_CONFIG_NPARITY);
4761 ndata = fnvlist_lookup_uint64(nv,
4762 ZPOOL_CONFIG_DRAID_NDATA);
4763 nspares = fnvlist_lookup_uint64(nv,
4764 ZPOOL_CONFIG_DRAID_NSPARES);
4765
4766 if (nvlist_lookup_uint64(nv,
4767 ZPOOL_CONFIG_DRAID_NCHILDREN, &children) != 0)
4768 children = width;
4769
4770 path = zpool_draid_name(buf, sizeof (buf), ndata,
4771 nparity, nspares, children, width);
4772 }
4773
4774 /*
4775 * We identify each top-level vdev by using a <type-id>
4776 * naming convention.
4777 */
4778 if (name_flags & VDEV_NAME_TYPE_ID) {
4779 uint64_t id = fnvlist_lookup_uint64(nv,
4780 ZPOOL_CONFIG_ID);
4781 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4782 path, (u_longlong_t)id);
4783 path = tmpbuf;
4784 }
4785 }
4786
4787 return (zfs_strdup(hdl, path));
4788 }
4789
4790 static int
zbookmark_mem_compare(const void * a,const void * b)4791 zbookmark_mem_compare(const void *a, const void *b)
4792 {
4793 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4794 }
4795
4796 void
zpool_add_propname(zpool_handle_t * zhp,const char * propname)4797 zpool_add_propname(zpool_handle_t *zhp, const char *propname)
4798 {
4799 assert(zhp->zpool_n_propnames < ZHP_MAX_PROPNAMES);
4800 zhp->zpool_propnames[zhp->zpool_n_propnames] = propname;
4801 zhp->zpool_n_propnames++;
4802 }
4803
4804 /*
4805 * Retrieve the persistent error log, uniquify the members, and return to the
4806 * caller.
4807 */
4808 int
zpool_get_errlog(zpool_handle_t * zhp,nvlist_t ** nverrlistp)4809 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4810 {
4811 zfs_cmd_t zc = {"\0"};
4812 libzfs_handle_t *hdl = zhp->zpool_hdl;
4813 zbookmark_phys_t *buf;
4814 uint64_t buflen = 10000; /* approx. 1MB of RAM */
4815
4816 if (fnvlist_lookup_uint64(zhp->zpool_config,
4817 ZPOOL_CONFIG_ERRCOUNT) == 0)
4818 return (0);
4819
4820 /*
4821 * Retrieve the raw error list from the kernel. If it doesn't fit,
4822 * allocate a larger buffer and retry.
4823 */
4824 (void) strcpy(zc.zc_name, zhp->zpool_name);
4825 for (;;) {
4826 buf = zfs_alloc(zhp->zpool_hdl,
4827 buflen * sizeof (zbookmark_phys_t));
4828 zc.zc_nvlist_dst = (uintptr_t)buf;
4829 zc.zc_nvlist_dst_size = buflen;
4830 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4831 &zc) != 0) {
4832 free(buf);
4833 if (errno == ENOMEM) {
4834 buflen *= 2;
4835 } else {
4836 return (zpool_standard_error_fmt(hdl, errno,
4837 dgettext(TEXT_DOMAIN, "errors: List of "
4838 "errors unavailable")));
4839 }
4840 } else {
4841 break;
4842 }
4843 }
4844
4845 /*
4846 * Sort the resulting bookmarks. This is a little confusing due to the
4847 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4848 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4849 * _not_ copied as part of the process. So we point the start of our
4850 * array appropriate and decrement the total number of elements.
4851 */
4852 zbookmark_phys_t *zb = buf + zc.zc_nvlist_dst_size;
4853 uint64_t zblen = buflen - zc.zc_nvlist_dst_size;
4854
4855 qsort(zb, zblen, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4856
4857 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4858
4859 /*
4860 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4861 */
4862 for (uint64_t i = 0; i < zblen; i++) {
4863 nvlist_t *nv;
4864
4865 /* ignoring zb_blkid and zb_level for now */
4866 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4867 zb[i-1].zb_object == zb[i].zb_object)
4868 continue;
4869
4870 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4871 goto nomem;
4872 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4873 zb[i].zb_objset) != 0) {
4874 nvlist_free(nv);
4875 goto nomem;
4876 }
4877 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4878 zb[i].zb_object) != 0) {
4879 nvlist_free(nv);
4880 goto nomem;
4881 }
4882 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4883 nvlist_free(nv);
4884 goto nomem;
4885 }
4886 nvlist_free(nv);
4887 }
4888
4889 free(buf);
4890 return (0);
4891
4892 nomem:
4893 free(buf);
4894 return (no_memory(zhp->zpool_hdl));
4895 }
4896
4897 /*
4898 * Upgrade a ZFS pool to the latest on-disk version.
4899 */
4900 int
zpool_upgrade(zpool_handle_t * zhp,uint64_t new_version)4901 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4902 {
4903 zfs_cmd_t zc = {"\0"};
4904 libzfs_handle_t *hdl = zhp->zpool_hdl;
4905
4906 (void) strcpy(zc.zc_name, zhp->zpool_name);
4907 zc.zc_cookie = new_version;
4908
4909 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4910 return (zpool_standard_error_fmt(hdl, errno,
4911 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4912 zhp->zpool_name));
4913 return (0);
4914 }
4915
4916 void
zfs_save_arguments(int argc,char ** argv,char * string,int len)4917 zfs_save_arguments(int argc, char **argv, char *string, int len)
4918 {
4919 int i;
4920
4921 (void) strlcpy(string, zfs_basename(argv[0]), len);
4922 for (i = 1; i < argc; i++) {
4923 (void) strlcat(string, " ", len);
4924 (void) strlcat(string, argv[i], len);
4925 }
4926 }
4927
4928 int
zpool_log_history(libzfs_handle_t * hdl,const char * message)4929 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4930 {
4931 zfs_cmd_t zc = {"\0"};
4932 nvlist_t *args;
4933
4934 args = fnvlist_alloc();
4935 fnvlist_add_string(args, "message", message);
4936 zcmd_write_src_nvlist(hdl, &zc, args);
4937 int err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4938 nvlist_free(args);
4939 zcmd_free_nvlists(&zc);
4940 return (err);
4941 }
4942
4943 /*
4944 * Perform ioctl to get some command history of a pool.
4945 *
4946 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4947 * logical offset of the history buffer to start reading from.
4948 *
4949 * Upon return, 'off' is the next logical offset to read from and
4950 * 'len' is the actual amount of bytes read into 'buf'.
4951 */
4952 static int
get_history(zpool_handle_t * zhp,char * buf,uint64_t * off,uint64_t * len)4953 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4954 {
4955 zfs_cmd_t zc = {"\0"};
4956 libzfs_handle_t *hdl = zhp->zpool_hdl;
4957
4958 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4959
4960 zc.zc_history = (uint64_t)(uintptr_t)buf;
4961 zc.zc_history_len = *len;
4962 zc.zc_history_offset = *off;
4963
4964 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4965 switch (errno) {
4966 case EPERM:
4967 return (zfs_error_fmt(hdl, EZFS_PERM,
4968 dgettext(TEXT_DOMAIN,
4969 "cannot show history for pool '%s'"),
4970 zhp->zpool_name));
4971 case ENOENT:
4972 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4973 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4974 "'%s'"), zhp->zpool_name));
4975 case ENOTSUP:
4976 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4977 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4978 "'%s', pool must be upgraded"), zhp->zpool_name));
4979 default:
4980 return (zpool_standard_error_fmt(hdl, errno,
4981 dgettext(TEXT_DOMAIN,
4982 "cannot get history for '%s'"), zhp->zpool_name));
4983 }
4984 }
4985
4986 *len = zc.zc_history_len;
4987 *off = zc.zc_history_offset;
4988
4989 return (0);
4990 }
4991
4992 /*
4993 * Retrieve the command history of a pool.
4994 */
4995 int
zpool_get_history(zpool_handle_t * zhp,nvlist_t ** nvhisp,uint64_t * off,boolean_t * eof)4996 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4997 boolean_t *eof)
4998 {
4999 libzfs_handle_t *hdl = zhp->zpool_hdl;
5000 char *buf;
5001 int buflen = 128 * 1024;
5002 nvlist_t **records = NULL;
5003 uint_t numrecords = 0;
5004 int err = 0, i;
5005 uint64_t start = *off;
5006
5007 buf = zfs_alloc(hdl, buflen);
5008
5009 /* process about 1MiB a time */
5010 while (*off - start < 1024 * 1024) {
5011 uint64_t bytes_read = buflen;
5012 uint64_t leftover;
5013
5014 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
5015 break;
5016
5017 /* if nothing else was read in, we're at EOF, just return */
5018 if (!bytes_read) {
5019 *eof = B_TRUE;
5020 break;
5021 }
5022
5023 if ((err = zpool_history_unpack(buf, bytes_read,
5024 &leftover, &records, &numrecords)) != 0) {
5025 zpool_standard_error_fmt(hdl, err,
5026 dgettext(TEXT_DOMAIN,
5027 "cannot get history for '%s'"), zhp->zpool_name);
5028 break;
5029 }
5030 *off -= leftover;
5031 if (leftover == bytes_read) {
5032 /*
5033 * no progress made, because buffer is not big enough
5034 * to hold this record; resize and retry.
5035 */
5036 buflen *= 2;
5037 free(buf);
5038 buf = zfs_alloc(hdl, buflen);
5039 }
5040 }
5041
5042 free(buf);
5043
5044 if (!err) {
5045 *nvhisp = fnvlist_alloc();
5046 fnvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
5047 (const nvlist_t **)records, numrecords);
5048 }
5049 for (i = 0; i < numrecords; i++)
5050 nvlist_free(records[i]);
5051 free(records);
5052
5053 return (err);
5054 }
5055
5056 /*
5057 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
5058 * If there is a new event available 'nvp' will contain a newly allocated
5059 * nvlist and 'dropped' will be set to the number of missed events since
5060 * the last call to this function. When 'nvp' is set to NULL it indicates
5061 * no new events are available. In either case the function returns 0 and
5062 * it is up to the caller to free 'nvp'. In the case of a fatal error the
5063 * function will return a non-zero value. When the function is called in
5064 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
5065 * it will not return until a new event is available.
5066 */
5067 int
zpool_events_next(libzfs_handle_t * hdl,nvlist_t ** nvp,int * dropped,unsigned flags,int zevent_fd)5068 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
5069 int *dropped, unsigned flags, int zevent_fd)
5070 {
5071 zfs_cmd_t zc = {"\0"};
5072 int error = 0;
5073
5074 *nvp = NULL;
5075 *dropped = 0;
5076 zc.zc_cleanup_fd = zevent_fd;
5077
5078 if (flags & ZEVENT_NONBLOCK)
5079 zc.zc_guid = ZEVENT_NONBLOCK;
5080
5081 zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE);
5082
5083 retry:
5084 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
5085 switch (errno) {
5086 case ESHUTDOWN:
5087 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
5088 dgettext(TEXT_DOMAIN, "zfs shutdown"));
5089 goto out;
5090 case ENOENT:
5091 /* Blocking error case should not occur */
5092 if (!(flags & ZEVENT_NONBLOCK))
5093 error = zpool_standard_error_fmt(hdl, errno,
5094 dgettext(TEXT_DOMAIN, "cannot get event"));
5095
5096 goto out;
5097 case ENOMEM:
5098 zcmd_expand_dst_nvlist(hdl, &zc);
5099 goto retry;
5100 default:
5101 error = zpool_standard_error_fmt(hdl, errno,
5102 dgettext(TEXT_DOMAIN, "cannot get event"));
5103 goto out;
5104 }
5105 }
5106
5107 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
5108 if (error != 0)
5109 goto out;
5110
5111 *dropped = (int)zc.zc_cookie;
5112 out:
5113 zcmd_free_nvlists(&zc);
5114
5115 return (error);
5116 }
5117
5118 /*
5119 * Clear all events.
5120 */
5121 int
zpool_events_clear(libzfs_handle_t * hdl,int * count)5122 zpool_events_clear(libzfs_handle_t *hdl, int *count)
5123 {
5124 zfs_cmd_t zc = {"\0"};
5125
5126 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
5127 return (zpool_standard_error(hdl, errno,
5128 dgettext(TEXT_DOMAIN, "cannot clear events")));
5129
5130 if (count != NULL)
5131 *count = (int)zc.zc_cookie; /* # of events cleared */
5132
5133 return (0);
5134 }
5135
5136 /*
5137 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
5138 * the passed zevent_fd file handle. On success zero is returned,
5139 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
5140 */
5141 int
zpool_events_seek(libzfs_handle_t * hdl,uint64_t eid,int zevent_fd)5142 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
5143 {
5144 zfs_cmd_t zc = {"\0"};
5145 int error = 0;
5146
5147 zc.zc_guid = eid;
5148 zc.zc_cleanup_fd = zevent_fd;
5149
5150 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
5151 switch (errno) {
5152 case ENOENT:
5153 error = zfs_error_fmt(hdl, EZFS_NOENT,
5154 dgettext(TEXT_DOMAIN, "cannot get event"));
5155 break;
5156
5157 case ENOMEM:
5158 error = zfs_error_fmt(hdl, EZFS_NOMEM,
5159 dgettext(TEXT_DOMAIN, "cannot get event"));
5160 break;
5161
5162 default:
5163 error = zpool_standard_error_fmt(hdl, errno,
5164 dgettext(TEXT_DOMAIN, "cannot get event"));
5165 break;
5166 }
5167 }
5168
5169 return (error);
5170 }
5171
5172 static void
zpool_obj_to_path_impl(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len,boolean_t always_unmounted)5173 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
5174 char *pathname, size_t len, boolean_t always_unmounted)
5175 {
5176 zfs_cmd_t zc = {"\0"};
5177 boolean_t mounted = B_FALSE;
5178 char *mntpnt = NULL;
5179 char dsname[ZFS_MAX_DATASET_NAME_LEN];
5180
5181 if (dsobj == 0) {
5182 /* special case for the MOS */
5183 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
5184 (longlong_t)obj);
5185 return;
5186 }
5187
5188 /* get the dataset's name */
5189 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
5190 zc.zc_obj = dsobj;
5191 if (zfs_ioctl(zhp->zpool_hdl,
5192 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
5193 /* just write out a path of two object numbers */
5194 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
5195 (longlong_t)dsobj, (longlong_t)obj);
5196 return;
5197 }
5198 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
5199
5200 /* find out if the dataset is mounted */
5201 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
5202 &mntpnt);
5203
5204 /* get the corrupted object's path */
5205 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
5206 zc.zc_obj = obj;
5207 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
5208 &zc) == 0) {
5209 if (mounted) {
5210 (void) snprintf(pathname, len, "%s%s", mntpnt,
5211 zc.zc_value);
5212 } else {
5213 (void) snprintf(pathname, len, "%s:%s",
5214 dsname, zc.zc_value);
5215 }
5216 } else {
5217 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
5218 (longlong_t)obj);
5219 }
5220 free(mntpnt);
5221 }
5222
5223 void
zpool_obj_to_path(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)5224 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
5225 char *pathname, size_t len)
5226 {
5227 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
5228 }
5229
5230 void
zpool_obj_to_path_ds(zpool_handle_t * zhp,uint64_t dsobj,uint64_t obj,char * pathname,size_t len)5231 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
5232 char *pathname, size_t len)
5233 {
5234 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
5235 }
5236 /*
5237 * Wait while the specified activity is in progress in the pool.
5238 */
5239 int
zpool_wait(zpool_handle_t * zhp,zpool_wait_activity_t activity)5240 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
5241 {
5242 boolean_t missing;
5243
5244 int error = zpool_wait_status(zhp, activity, &missing, NULL);
5245
5246 if (missing) {
5247 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
5248 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
5249 zhp->zpool_name);
5250 return (ENOENT);
5251 } else {
5252 return (error);
5253 }
5254 }
5255
5256 /*
5257 * Wait for the given activity and return the status of the wait (whether or not
5258 * any waiting was done) in the 'waited' parameter. Non-existent pools are
5259 * reported via the 'missing' parameter, rather than by printing an error
5260 * message. This is convenient when this function is called in a loop over a
5261 * long period of time (as it is, for example, by zpool's wait cmd). In that
5262 * scenario, a pool being exported or destroyed should be considered a normal
5263 * event, so we don't want to print an error when we find that the pool doesn't
5264 * exist.
5265 */
5266 int
zpool_wait_status(zpool_handle_t * zhp,zpool_wait_activity_t activity,boolean_t * missing,boolean_t * waited)5267 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
5268 boolean_t *missing, boolean_t *waited)
5269 {
5270 int error = lzc_wait(zhp->zpool_name, activity, waited);
5271 *missing = (error == ENOENT);
5272 if (*missing)
5273 return (0);
5274
5275 if (error != 0) {
5276 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5277 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
5278 zhp->zpool_name);
5279 }
5280
5281 return (error);
5282 }
5283
5284 int
zpool_set_bootenv(zpool_handle_t * zhp,const nvlist_t * envmap)5285 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
5286 {
5287 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
5288 if (error != 0) {
5289 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5290 dgettext(TEXT_DOMAIN,
5291 "error setting bootenv in pool '%s'"), zhp->zpool_name);
5292 }
5293
5294 return (error);
5295 }
5296
5297 int
zpool_get_bootenv(zpool_handle_t * zhp,nvlist_t ** nvlp)5298 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
5299 {
5300 nvlist_t *nvl;
5301 int error;
5302
5303 nvl = NULL;
5304 error = lzc_get_bootenv(zhp->zpool_name, &nvl);
5305 if (error != 0) {
5306 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
5307 dgettext(TEXT_DOMAIN,
5308 "error getting bootenv in pool '%s'"), zhp->zpool_name);
5309 } else {
5310 *nvlp = nvl;
5311 }
5312
5313 return (error);
5314 }
5315
5316 /*
5317 * Attempt to read and parse feature file(s) (from "compatibility" property).
5318 * Files contain zpool feature names, comma or whitespace-separated.
5319 * Comments (# character to next newline) are discarded.
5320 *
5321 * Arguments:
5322 * compatibility : string containing feature filenames
5323 * features : either NULL or pointer to array of boolean
5324 * report : either NULL or pointer to string buffer
5325 * rlen : length of "report" buffer
5326 *
5327 * compatibility is NULL (unset), "", "off", "legacy", or list of
5328 * comma-separated filenames. filenames should either be absolute,
5329 * or relative to:
5330 * 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
5331 * 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
5332 * (Unset), "" or "off" => enable all features
5333 * "legacy" => disable all features
5334 *
5335 * Any feature names read from files which match unames in spa_feature_table
5336 * will have the corresponding boolean set in the features array (if non-NULL).
5337 * If more than one feature set specified, only features present in *all* of
5338 * them will be set.
5339 *
5340 * "report" if not NULL will be populated with a suitable status message.
5341 *
5342 * Return values:
5343 * ZPOOL_COMPATIBILITY_OK : files read and parsed ok
5344 * ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
5345 * ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
5346 * ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
5347 * ZPOOL_COMPATIBILITY_NOFILES : no feature files found
5348 */
5349 zpool_compat_status_t
zpool_load_compat(const char * compat,boolean_t * features,char * report,size_t rlen)5350 zpool_load_compat(const char *compat, boolean_t *features, char *report,
5351 size_t rlen)
5352 {
5353 int sdirfd, ddirfd, featfd;
5354 struct stat fs;
5355 char *fc;
5356 char *ps, *ls, *ws;
5357 char *file, *line, *word;
5358
5359 char l_compat[ZFS_MAXPROPLEN];
5360
5361 boolean_t ret_nofiles = B_TRUE;
5362 boolean_t ret_badfile = B_FALSE;
5363 boolean_t ret_badtoken = B_FALSE;
5364 boolean_t ret_warntoken = B_FALSE;
5365
5366 /* special cases (unset), "" and "off" => enable all features */
5367 if (compat == NULL || compat[0] == '\0' ||
5368 strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
5369 if (features != NULL) {
5370 for (uint_t i = 0; i < SPA_FEATURES; i++)
5371 features[i] = B_TRUE;
5372 }
5373 if (report != NULL)
5374 strlcpy(report, gettext("all features enabled"), rlen);
5375 return (ZPOOL_COMPATIBILITY_OK);
5376 }
5377
5378 /* Final special case "legacy" => disable all features */
5379 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
5380 if (features != NULL)
5381 for (uint_t i = 0; i < SPA_FEATURES; i++)
5382 features[i] = B_FALSE;
5383 if (report != NULL)
5384 strlcpy(report, gettext("all features disabled"), rlen);
5385 return (ZPOOL_COMPATIBILITY_OK);
5386 }
5387
5388 /*
5389 * Start with all true; will be ANDed with results from each file
5390 */
5391 if (features != NULL)
5392 for (uint_t i = 0; i < SPA_FEATURES; i++)
5393 features[i] = B_TRUE;
5394
5395 char err_badfile[ZFS_MAXPROPLEN] = "";
5396 char err_badtoken[ZFS_MAXPROPLEN] = "";
5397
5398 /*
5399 * We ignore errors from the directory open()
5400 * as they're only needed if the filename is relative
5401 * which will be checked during the openat().
5402 */
5403
5404 /* O_PATH safer than O_RDONLY if system allows it */
5405 #if defined(O_PATH)
5406 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
5407 #else
5408 #define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
5409 #endif
5410
5411 sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
5412 ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
5413
5414 (void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
5415
5416 for (file = strtok_r(l_compat, ",", &ps);
5417 file != NULL;
5418 file = strtok_r(NULL, ",", &ps)) {
5419
5420 boolean_t l_features[SPA_FEATURES];
5421
5422 enum { Z_SYSCONF, Z_DATA } source;
5423
5424 /* try sysconfdir first, then datadir */
5425 source = Z_SYSCONF;
5426 if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
5427 featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
5428 source = Z_DATA;
5429 }
5430
5431 /* File readable and correct size? */
5432 if (featfd < 0 ||
5433 fstat(featfd, &fs) < 0 ||
5434 fs.st_size < 1 ||
5435 fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
5436 (void) close(featfd);
5437 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5438 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5439 ret_badfile = B_TRUE;
5440 continue;
5441 }
5442
5443 /* Prefault the file if system allows */
5444 #if defined(MAP_POPULATE)
5445 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
5446 #elif defined(MAP_PREFAULT_READ)
5447 #define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
5448 #else
5449 #define ZC_MMAP_FLAGS (MAP_PRIVATE)
5450 #endif
5451
5452 /* private mmap() so we can strtok safely */
5453 fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
5454 ZC_MMAP_FLAGS, featfd, 0);
5455 (void) close(featfd);
5456
5457 /* map ok, and last character == newline? */
5458 if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
5459 (void) munmap((void *) fc, fs.st_size);
5460 strlcat(err_badfile, file, ZFS_MAXPROPLEN);
5461 strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
5462 ret_badfile = B_TRUE;
5463 continue;
5464 }
5465
5466 ret_nofiles = B_FALSE;
5467
5468 for (uint_t i = 0; i < SPA_FEATURES; i++)
5469 l_features[i] = B_FALSE;
5470
5471 /* replace final newline with NULL to ensure string ends */
5472 fc[fs.st_size - 1] = '\0';
5473
5474 for (line = strtok_r(fc, "\n", &ls);
5475 line != NULL;
5476 line = strtok_r(NULL, "\n", &ls)) {
5477 /* discard comments */
5478 char *r = strchr(line, '#');
5479 if (r != NULL)
5480 *r = '\0';
5481
5482 for (word = strtok_r(line, ", \t", &ws);
5483 word != NULL;
5484 word = strtok_r(NULL, ", \t", &ws)) {
5485 /* Find matching feature name */
5486 uint_t f;
5487 for (f = 0; f < SPA_FEATURES; f++) {
5488 zfeature_info_t *fi =
5489 &spa_feature_table[f];
5490 if (strcmp(word, fi->fi_uname) == 0) {
5491 l_features[f] = B_TRUE;
5492 break;
5493 }
5494 }
5495 if (f < SPA_FEATURES)
5496 continue;
5497
5498 /* found an unrecognized word */
5499 /* lightly sanitize it */
5500 if (strlen(word) > 32)
5501 word[32] = '\0';
5502 for (char *c = word; *c != '\0'; c++)
5503 if (!isprint(*c))
5504 *c = '?';
5505
5506 strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
5507 strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
5508 if (source == Z_SYSCONF)
5509 ret_badtoken = B_TRUE;
5510 else
5511 ret_warntoken = B_TRUE;
5512 }
5513 }
5514 (void) munmap((void *) fc, fs.st_size);
5515
5516 if (features != NULL)
5517 for (uint_t i = 0; i < SPA_FEATURES; i++)
5518 features[i] &= l_features[i];
5519 }
5520 (void) close(sdirfd);
5521 (void) close(ddirfd);
5522
5523 /* Return the most serious error */
5524 if (ret_badfile) {
5525 if (report != NULL)
5526 snprintf(report, rlen, gettext("could not read/"
5527 "parse feature file(s): %s"), err_badfile);
5528 return (ZPOOL_COMPATIBILITY_BADFILE);
5529 }
5530 if (ret_nofiles) {
5531 if (report != NULL)
5532 strlcpy(report,
5533 gettext("no valid compatibility files specified"),
5534 rlen);
5535 return (ZPOOL_COMPATIBILITY_NOFILES);
5536 }
5537 if (ret_badtoken) {
5538 if (report != NULL)
5539 snprintf(report, rlen, gettext("invalid feature "
5540 "name(s) in local compatibility files: %s"),
5541 err_badtoken);
5542 return (ZPOOL_COMPATIBILITY_BADTOKEN);
5543 }
5544 if (ret_warntoken) {
5545 if (report != NULL)
5546 snprintf(report, rlen, gettext("unrecognized feature "
5547 "name(s) in distribution compatibility files: %s"),
5548 err_badtoken);
5549 return (ZPOOL_COMPATIBILITY_WARNTOKEN);
5550 }
5551 if (report != NULL)
5552 strlcpy(report, gettext("compatibility set ok"), rlen);
5553 return (ZPOOL_COMPATIBILITY_OK);
5554 }
5555
5556 static int
zpool_vdev_guid(zpool_handle_t * zhp,const char * vdevname,uint64_t * vdev_guid)5557 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
5558 {
5559 nvlist_t *tgt;
5560 boolean_t avail_spare, l2cache;
5561
5562 verify(zhp != NULL);
5563 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
5564 char errbuf[ERRBUFLEN];
5565 (void) snprintf(errbuf, sizeof (errbuf),
5566 dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
5567 return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
5568 }
5569
5570 if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
5571 NULL)) == NULL) {
5572 char errbuf[ERRBUFLEN];
5573 (void) snprintf(errbuf, sizeof (errbuf),
5574 dgettext(TEXT_DOMAIN, "can not find %s in %s"),
5575 vdevname, zhp->zpool_name);
5576 return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
5577 }
5578
5579 *vdev_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
5580 return (0);
5581 }
5582
5583 /*
5584 * Get a vdev property value for 'prop' and return the value in
5585 * a pre-allocated buffer.
5586 */
5587 int
zpool_get_vdev_prop_value(nvlist_t * nvprop,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5588 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
5589 char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
5590 {
5591 nvlist_t *nv;
5592 const char *strval;
5593 uint64_t intval;
5594 zprop_source_t src = ZPROP_SRC_NONE;
5595
5596 if (prop == VDEV_PROP_USERPROP) {
5597 /* user property, prop_name must contain the property name */
5598 assert(prop_name != NULL);
5599 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5600 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5601 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5602 } else {
5603 /* user prop not found */
5604 src = ZPROP_SRC_DEFAULT;
5605 strval = "-";
5606 }
5607 (void) strlcpy(buf, strval, len);
5608 if (srctype)
5609 *srctype = src;
5610 return (0);
5611 }
5612
5613 if (prop_name == NULL)
5614 prop_name = (char *)vdev_prop_to_name(prop);
5615
5616 switch (vdev_prop_get_type(prop)) {
5617 case PROP_TYPE_STRING:
5618 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5619 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5620 strval = fnvlist_lookup_string(nv, ZPROP_VALUE);
5621 } else {
5622 src = ZPROP_SRC_DEFAULT;
5623 if ((strval = vdev_prop_default_string(prop)) == NULL)
5624 strval = "-";
5625 }
5626 (void) strlcpy(buf, strval, len);
5627 break;
5628
5629 case PROP_TYPE_NUMBER:
5630 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5631 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5632 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5633 } else {
5634 src = ZPROP_SRC_DEFAULT;
5635 intval = vdev_prop_default_numeric(prop);
5636 }
5637
5638 switch (prop) {
5639 case VDEV_PROP_ASIZE:
5640 case VDEV_PROP_PSIZE:
5641 case VDEV_PROP_SIZE:
5642 case VDEV_PROP_BOOTSIZE:
5643 case VDEV_PROP_ALLOCATED:
5644 case VDEV_PROP_FREE:
5645 case VDEV_PROP_READ_ERRORS:
5646 case VDEV_PROP_WRITE_ERRORS:
5647 case VDEV_PROP_CHECKSUM_ERRORS:
5648 case VDEV_PROP_INITIALIZE_ERRORS:
5649 case VDEV_PROP_TRIM_ERRORS:
5650 case VDEV_PROP_SLOW_IOS:
5651 case VDEV_PROP_OPS_NULL:
5652 case VDEV_PROP_OPS_READ:
5653 case VDEV_PROP_OPS_WRITE:
5654 case VDEV_PROP_OPS_FREE:
5655 case VDEV_PROP_OPS_CLAIM:
5656 case VDEV_PROP_OPS_TRIM:
5657 case VDEV_PROP_BYTES_NULL:
5658 case VDEV_PROP_BYTES_READ:
5659 case VDEV_PROP_BYTES_WRITE:
5660 case VDEV_PROP_BYTES_FREE:
5661 case VDEV_PROP_BYTES_CLAIM:
5662 case VDEV_PROP_BYTES_TRIM:
5663 if (literal) {
5664 (void) snprintf(buf, len, "%llu",
5665 (u_longlong_t)intval);
5666 } else {
5667 (void) zfs_nicenum(intval, buf, len);
5668 }
5669 break;
5670 case VDEV_PROP_EXPANDSZ:
5671 if (intval == 0) {
5672 (void) strlcpy(buf, "-", len);
5673 } else if (literal) {
5674 (void) snprintf(buf, len, "%llu",
5675 (u_longlong_t)intval);
5676 } else {
5677 (void) zfs_nicenum(intval, buf, len);
5678 }
5679 break;
5680 case VDEV_PROP_CAPACITY:
5681 if (literal) {
5682 (void) snprintf(buf, len, "%llu",
5683 (u_longlong_t)intval);
5684 } else {
5685 (void) snprintf(buf, len, "%llu%%",
5686 (u_longlong_t)intval);
5687 }
5688 break;
5689 case VDEV_PROP_CHECKSUM_N:
5690 case VDEV_PROP_CHECKSUM_T:
5691 case VDEV_PROP_IO_N:
5692 case VDEV_PROP_IO_T:
5693 case VDEV_PROP_SLOW_IO_N:
5694 case VDEV_PROP_SLOW_IO_T:
5695 case VDEV_PROP_FDOMAIN:
5696 case VDEV_PROP_FGROUP:
5697 if (intval == UINT64_MAX) {
5698 (void) strlcpy(buf, "-", len);
5699 } else {
5700 (void) snprintf(buf, len, "%llu",
5701 (u_longlong_t)intval);
5702 }
5703 break;
5704 case VDEV_PROP_FRAGMENTATION:
5705 if (intval == UINT64_MAX) {
5706 (void) strlcpy(buf, "-", len);
5707 } else {
5708 (void) snprintf(buf, len, "%llu%%",
5709 (u_longlong_t)intval);
5710 }
5711 break;
5712 case VDEV_PROP_STATE:
5713 if (literal) {
5714 (void) snprintf(buf, len, "%llu",
5715 (u_longlong_t)intval);
5716 } else {
5717 (void) strlcpy(buf, zpool_state_to_name(intval,
5718 VDEV_AUX_NONE), len);
5719 }
5720 break;
5721 default:
5722 (void) snprintf(buf, len, "%llu",
5723 (u_longlong_t)intval);
5724 }
5725 break;
5726
5727 case PROP_TYPE_INDEX:
5728 if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5729 src = fnvlist_lookup_uint64(nv, ZPROP_SOURCE);
5730 intval = fnvlist_lookup_uint64(nv, ZPROP_VALUE);
5731 } else {
5732 /* 'trim_support' only valid for leaf vdevs */
5733 if (prop == VDEV_PROP_TRIM_SUPPORT) {
5734 (void) strlcpy(buf, "-", len);
5735 break;
5736 }
5737 src = ZPROP_SRC_DEFAULT;
5738 intval = vdev_prop_default_numeric(prop);
5739 /* Only use if provided by the RAIDZ VDEV above */
5740 if (prop == VDEV_PROP_RAIDZ_EXPANDING)
5741 return (ENOENT);
5742 if (prop == VDEV_PROP_SIT_OUT)
5743 return (ENOENT);
5744 }
5745 if (vdev_prop_index_to_string(prop, intval,
5746 (const char **)&strval) != 0)
5747 return (-1);
5748 (void) strlcpy(buf, strval, len);
5749 break;
5750
5751 default:
5752 abort();
5753 }
5754
5755 if (srctype)
5756 *srctype = src;
5757
5758 return (0);
5759 }
5760
5761 /*
5762 * Get a vdev property value for 'prop_name' and return the value in
5763 * a pre-allocated buffer.
5764 */
5765 int
zpool_get_vdev_prop(zpool_handle_t * zhp,const char * vdevname,vdev_prop_t prop,char * prop_name,char * buf,size_t len,zprop_source_t * srctype,boolean_t literal)5766 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
5767 char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
5768 boolean_t literal)
5769 {
5770 nvlist_t *reqnvl, *reqprops;
5771 nvlist_t *retprops = NULL;
5772 uint64_t vdev_guid = 0;
5773 int ret;
5774
5775 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5776 return (ret);
5777
5778 if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
5779 return (no_memory(zhp->zpool_hdl));
5780 if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
5781 return (no_memory(zhp->zpool_hdl));
5782
5783 fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5784
5785 if (prop != VDEV_PROP_USERPROP) {
5786 /* prop_name overrides prop value */
5787 if (prop_name != NULL)
5788 prop = vdev_name_to_prop(prop_name);
5789 else
5790 prop_name = (char *)vdev_prop_to_name(prop);
5791 assert(prop < VDEV_NUM_PROPS);
5792 }
5793
5794 assert(prop_name != NULL);
5795 if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
5796 nvlist_free(reqnvl);
5797 nvlist_free(reqprops);
5798 return (no_memory(zhp->zpool_hdl));
5799 }
5800
5801 fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
5802
5803 ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
5804
5805 if (ret == 0) {
5806 ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
5807 len, srctype, literal);
5808 } else {
5809 char errbuf[ERRBUFLEN];
5810 (void) snprintf(errbuf, sizeof (errbuf),
5811 dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
5812 " %s in %s"), prop_name, vdevname, zhp->zpool_name);
5813 (void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
5814 }
5815
5816 nvlist_free(reqnvl);
5817 nvlist_free(reqprops);
5818 nvlist_free(retprops);
5819
5820 return (ret);
5821 }
5822
5823 /*
5824 * Get all vdev properties
5825 */
5826 int
zpool_get_all_vdev_props(zpool_handle_t * zhp,const char * vdevname,nvlist_t ** outnvl)5827 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
5828 nvlist_t **outnvl)
5829 {
5830 nvlist_t *nvl = NULL;
5831 uint64_t vdev_guid = 0;
5832 int ret;
5833
5834 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5835 return (ret);
5836
5837 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5838 return (no_memory(zhp->zpool_hdl));
5839
5840 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5841
5842 ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
5843
5844 nvlist_free(nvl);
5845
5846 if (ret) {
5847 char errbuf[ERRBUFLEN];
5848 (void) snprintf(errbuf, sizeof (errbuf),
5849 dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
5850 " %s in %s"), vdevname, zhp->zpool_name);
5851 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5852 }
5853
5854 return (ret);
5855 }
5856
5857 /*
5858 * Set vdev property
5859 */
5860 int
zpool_set_vdev_prop(zpool_handle_t * zhp,const char * vdevname,const char * propname,const char * propval)5861 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
5862 const char *propname, const char *propval)
5863 {
5864 int ret;
5865 nvlist_t *nvl = NULL;
5866 nvlist_t *outnvl = NULL;
5867 nvlist_t *props;
5868 nvlist_t *realprops;
5869 prop_flags_t flags = { 0 };
5870 uint64_t version;
5871 uint64_t vdev_guid;
5872
5873 if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5874 return (ret);
5875
5876 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5877 return (no_memory(zhp->zpool_hdl));
5878 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
5879 return (no_memory(zhp->zpool_hdl));
5880
5881 fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
5882
5883 if (nvlist_add_string(props, propname, propval) != 0) {
5884 nvlist_free(props);
5885 return (no_memory(zhp->zpool_hdl));
5886 }
5887
5888 char errbuf[ERRBUFLEN];
5889 (void) snprintf(errbuf, sizeof (errbuf),
5890 dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
5891 propname, vdevname, zhp->zpool_name);
5892
5893 flags.vdevprop = 1;
5894 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
5895 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
5896 zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
5897 nvlist_free(props);
5898 nvlist_free(nvl);
5899 return (-1);
5900 }
5901
5902 nvlist_free(props);
5903 props = realprops;
5904
5905 fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
5906
5907 ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
5908
5909 nvlist_free(props);
5910 nvlist_free(nvl);
5911 nvlist_free(outnvl);
5912
5913 if (ret) {
5914 if (errno == ENOTSUP) {
5915 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
5916 "property not supported for this vdev"));
5917 (void) zfs_error(zhp->zpool_hdl, EZFS_PROPTYPE, errbuf);
5918 } else {
5919 (void) zpool_standard_error(zhp->zpool_hdl, errno,
5920 errbuf);
5921 }
5922 }
5923
5924 return (ret);
5925 }
5926
5927 /*
5928 * Prune older entries from the DDT to reclaim space under the quota
5929 */
5930 int
zpool_ddt_prune(zpool_handle_t * zhp,zpool_ddt_prune_unit_t unit,uint64_t amount)5931 zpool_ddt_prune(zpool_handle_t *zhp, zpool_ddt_prune_unit_t unit,
5932 uint64_t amount)
5933 {
5934 int error = lzc_ddt_prune(zhp->zpool_name, unit, amount);
5935 if (error != 0) {
5936 libzfs_handle_t *hdl = zhp->zpool_hdl;
5937 char errbuf[ERRBUFLEN];
5938
5939 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
5940 "cannot prune dedup table on '%s'"), zhp->zpool_name);
5941
5942 if (error == EALREADY) {
5943 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
5944 "a prune operation is already in progress"));
5945 (void) zfs_error(hdl, EZFS_BUSY, errbuf);
5946 } else {
5947 (void) zpool_standard_error(hdl, errno, errbuf);
5948 }
5949 return (-1);
5950 }
5951
5952 return (0);
5953 }
5954