1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2011 Martin Matuska
25 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
26 * Copyright (c) 2012 Pawel Jakub Dawidek
27 * Copyright (c) 2014, 2016 Joyent, Inc. All rights reserved.
28 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
29 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
30 * Copyright (c) 2011, 2024 by Delphix. All rights reserved.
31 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
32 * Copyright (c) 2013 Steven Hartland. All rights reserved.
33 * Copyright (c) 2014 Integros [integros.com]
34 * Copyright 2016 Toomas Soome <tsoome@me.com>
35 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
36 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
37 * Copyright 2017 RackTop Systems.
38 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
39 * Copyright (c) 2019 Datto Inc.
40 * Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
41 * Copyright (c) 2019, 2021, 2023, 2024, Klara Inc.
42 * Copyright (c) 2019, Allan Jude
43 * Copyright 2024 Oxide Computer Company
44 */
45
46 /*
47 * ZFS ioctls.
48 *
49 * This file handles the ioctls to /dev/zfs, used for configuring ZFS storage
50 * pools and filesystems, e.g. with /sbin/zfs and /sbin/zpool.
51 *
52 * There are two ways that we handle ioctls: the legacy way where almost
53 * all of the logic is in the ioctl callback, and the new way where most
54 * of the marshalling is handled in the common entry point, zfsdev_ioctl().
55 *
56 * Non-legacy ioctls should be registered by calling
57 * zfs_ioctl_register() from zfs_ioctl_init(). The ioctl is invoked
58 * from userland by lzc_ioctl().
59 *
60 * The registration arguments are as follows:
61 *
62 * const char *name
63 * The name of the ioctl. This is used for history logging. If the
64 * ioctl returns successfully (the callback returns 0), and allow_log
65 * is true, then a history log entry will be recorded with the input &
66 * output nvlists. The log entry can be printed with "zpool history -i".
67 *
68 * zfs_ioc_t ioc
69 * The ioctl request number, which userland will pass to ioctl(2).
70 * We want newer versions of libzfs and libzfs_core to run against
71 * existing zfs kernel modules (i.e. a deferred reboot after an update).
72 * Therefore the ioctl numbers cannot change from release to release.
73 *
74 * zfs_secpolicy_func_t *secpolicy
75 * This function will be called before the zfs_ioc_func_t, to
76 * determine if this operation is permitted. It should return EPERM
77 * on failure, and 0 on success. Checks include determining if the
78 * dataset is visible in this zone, and if the user has either all
79 * zfs privileges in the zone (SYS_MOUNT), or has been granted permission
80 * to do this operation on this dataset with "zfs allow".
81 *
82 * zfs_ioc_namecheck_t namecheck
83 * This specifies what to expect in the zfs_cmd_t:zc_name -- a pool
84 * name, a dataset name, or nothing. If the name is not well-formed,
85 * the ioctl will fail and the callback will not be called.
86 * Therefore, the callback can assume that the name is well-formed
87 * (e.g. is null-terminated, doesn't have more than one '@' character,
88 * doesn't have invalid characters).
89 *
90 * zfs_ioc_poolcheck_t pool_check
91 * This specifies requirements on the pool state. If the pool does
92 * not meet them (is suspended or is readonly), the ioctl will fail
93 * and the callback will not be called. If any checks are specified
94 * (i.e. it is not POOL_CHECK_NONE), namecheck must not be NO_NAME.
95 * Multiple checks can be or-ed together (e.g. POOL_CHECK_SUSPENDED |
96 * POOL_CHECK_READONLY).
97 *
98 * zfs_ioc_key_t *nvl_keys
99 * The list of expected/allowable innvl input keys. This list is used
100 * to validate the nvlist input to the ioctl.
101 *
102 * boolean_t smush_outnvlist
103 * If smush_outnvlist is true, then the output is presumed to be a
104 * list of errors, and it will be "smushed" down to fit into the
105 * caller's buffer, by removing some entries and replacing them with a
106 * single "N_MORE_ERRORS" entry indicating how many were removed. See
107 * nvlist_smush() for details. If smush_outnvlist is false, and the
108 * outnvlist does not fit into the userland-provided buffer, then the
109 * ioctl will fail with ENOMEM.
110 *
111 * zfs_ioc_func_t *func
112 * The callback function that will perform the operation.
113 *
114 * The callback should return 0 on success, or an error number on
115 * failure. If the function fails, the userland ioctl will return -1,
116 * and errno will be set to the callback's return value. The callback
117 * will be called with the following arguments:
118 *
119 * const char *name
120 * The name of the pool or dataset to operate on, from
121 * zfs_cmd_t:zc_name. The 'namecheck' argument specifies the
122 * expected type (pool, dataset, or none).
123 *
124 * nvlist_t *innvl
125 * The input nvlist, deserialized from zfs_cmd_t:zc_nvlist_src. Or
126 * NULL if no input nvlist was provided. Changes to this nvlist are
127 * ignored. If the input nvlist could not be deserialized, the
128 * ioctl will fail and the callback will not be called.
129 *
130 * nvlist_t *outnvl
131 * The output nvlist, initially empty. The callback can fill it in,
132 * and it will be returned to userland by serializing it into
133 * zfs_cmd_t:zc_nvlist_dst. If it is non-empty, and serialization
134 * fails (e.g. because the caller didn't supply a large enough
135 * buffer), then the overall ioctl will fail. See the
136 * 'smush_nvlist' argument above for additional behaviors.
137 *
138 * There are two typical uses of the output nvlist:
139 * - To return state, e.g. property values. In this case,
140 * smush_outnvlist should be false. If the buffer was not large
141 * enough, the caller will reallocate a larger buffer and try
142 * the ioctl again.
143 *
144 * - To return multiple errors from an ioctl which makes on-disk
145 * changes. In this case, smush_outnvlist should be true.
146 * Ioctls which make on-disk modifications should generally not
147 * use the outnvl if they succeed, because the caller can not
148 * distinguish between the operation failing, and
149 * deserialization failing.
150 *
151 * IOCTL Interface Errors
152 *
153 * The following ioctl input errors can be returned:
154 * ZFS_ERR_IOC_CMD_UNAVAIL the ioctl number is not supported by kernel
155 * ZFS_ERR_IOC_ARG_UNAVAIL an input argument is not supported by kernel
156 * ZFS_ERR_IOC_ARG_REQUIRED a required input argument is missing
157 * ZFS_ERR_IOC_ARG_BADTYPE an input argument has an invalid type
158 */
159
160 #include <sys/types.h>
161 #include <sys/param.h>
162 #include <sys/errno.h>
163 #include <sys/file.h>
164 #include <sys/kmem.h>
165 #include <sys/cmn_err.h>
166 #include <sys/stat.h>
167 #include <sys/zfs_ioctl.h>
168 #include <sys/zfs_quota.h>
169 #include <sys/zfs_vfsops.h>
170 #include <sys/zfs_znode.h>
171 #include <sys/zap.h>
172 #include <sys/spa.h>
173 #include <sys/spa_impl.h>
174 #include <sys/vdev.h>
175 #include <sys/vdev_impl.h>
176 #include <sys/dmu.h>
177 #include <sys/dsl_dir.h>
178 #include <sys/dsl_dataset.h>
179 #include <sys/dsl_prop.h>
180 #include <sys/dsl_deleg.h>
181 #include <sys/dmu_objset.h>
182 #include <sys/dmu_impl.h>
183 #include <sys/dmu_redact.h>
184 #include <sys/dmu_tx.h>
185 #include <sys/sunddi.h>
186 #include <sys/policy.h>
187 #include <sys/zone.h>
188 #include <sys/nvpair.h>
189 #include <sys/pathname.h>
190 #include <sys/fs/zfs.h>
191 #include <sys/zfs_ctldir.h>
192 #include <sys/zfs_dir.h>
193 #include <sys/zfs_onexit.h>
194 #include <sys/zvol.h>
195 #include <sys/dsl_scan.h>
196 #include <sys/fm/util.h>
197 #include <sys/dsl_crypt.h>
198 #include <sys/rrwlock.h>
199 #include <sys/zfs_file.h>
200
201 #include <sys/dmu_recv.h>
202 #include <sys/dmu_send.h>
203 #include <sys/dmu_recv.h>
204 #include <sys/dsl_destroy.h>
205 #include <sys/dsl_bookmark.h>
206 #include <sys/dsl_userhold.h>
207 #include <sys/zfeature.h>
208 #include <sys/zcp.h>
209 #include <sys/zio_checksum.h>
210 #include <sys/vdev_removal.h>
211 #include <sys/vdev_impl.h>
212 #include <sys/vdev_initialize.h>
213 #include <sys/vdev_trim.h>
214
215 #include "zfs_namecheck.h"
216 #include "zfs_prop.h"
217 #include "zfs_deleg.h"
218 #include "zfs_comutil.h"
219
220 #include <sys/lua/lua.h>
221 #include <sys/lua/lauxlib.h>
222 #include <sys/zfs_ioctl_impl.h>
223
224 kmutex_t zfsdev_state_lock;
225 static zfsdev_state_t zfsdev_state_listhead;
226
227 /*
228 * Limit maximum nvlist size. We don't want users passing in insane values
229 * for zc->zc_nvlist_src_size, since we will need to allocate that much memory.
230 * Defaults to 0=auto which is handled by platform code.
231 */
232 uint64_t zfs_max_nvlist_src_size = 0;
233
234 /*
235 * When logging the output nvlist of an ioctl in the on-disk history, limit
236 * the logged size to this many bytes. This must be less than DMU_MAX_ACCESS.
237 * This applies primarily to zfs_ioc_channel_program().
238 */
239 static uint64_t zfs_history_output_max = 1024 * 1024;
240
241 uint_t zfs_allow_log_key;
242
243 /* DATA_TYPE_ANY is used when zkey_type can vary. */
244 #define DATA_TYPE_ANY DATA_TYPE_UNKNOWN
245
246 typedef struct zfs_ioc_vec {
247 zfs_ioc_legacy_func_t *zvec_legacy_func;
248 zfs_ioc_func_t *zvec_func;
249 zfs_secpolicy_func_t *zvec_secpolicy;
250 zfs_ioc_namecheck_t zvec_namecheck;
251 boolean_t zvec_allow_log;
252 zfs_ioc_poolcheck_t zvec_pool_check;
253 boolean_t zvec_smush_outnvlist;
254 const char *zvec_name;
255 const zfs_ioc_key_t *zvec_nvl_keys;
256 size_t zvec_nvl_key_count;
257 } zfs_ioc_vec_t;
258
259 /* This array is indexed by zfs_userquota_prop_t */
260 static const char *userquota_perms[] = {
261 ZFS_DELEG_PERM_USERUSED,
262 ZFS_DELEG_PERM_USERQUOTA,
263 ZFS_DELEG_PERM_GROUPUSED,
264 ZFS_DELEG_PERM_GROUPQUOTA,
265 ZFS_DELEG_PERM_USEROBJUSED,
266 ZFS_DELEG_PERM_USEROBJQUOTA,
267 ZFS_DELEG_PERM_GROUPOBJUSED,
268 ZFS_DELEG_PERM_GROUPOBJQUOTA,
269 ZFS_DELEG_PERM_PROJECTUSED,
270 ZFS_DELEG_PERM_PROJECTQUOTA,
271 ZFS_DELEG_PERM_PROJECTOBJUSED,
272 ZFS_DELEG_PERM_PROJECTOBJQUOTA,
273 };
274
275 static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc);
276 static int zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc);
277 static int zfs_check_settable(const char *name, nvpair_t *property,
278 cred_t *cr);
279 static int zfs_check_clearable(const char *dataset, nvlist_t *props,
280 nvlist_t **errors);
281 static int zfs_fill_zplprops_root(uint64_t, nvlist_t *, nvlist_t *,
282 boolean_t *);
283 int zfs_set_prop_nvlist(const char *, zprop_source_t, nvlist_t *, nvlist_t *);
284 static int get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp);
285
286 static void
history_str_free(char * buf)287 history_str_free(char *buf)
288 {
289 kmem_free(buf, HIS_MAX_RECORD_LEN);
290 }
291
292 static char *
history_str_get(zfs_cmd_t * zc)293 history_str_get(zfs_cmd_t *zc)
294 {
295 char *buf;
296
297 if (zc->zc_history == 0)
298 return (NULL);
299
300 buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
301 if (copyinstr((void *)(uintptr_t)zc->zc_history,
302 buf, HIS_MAX_RECORD_LEN, NULL) != 0) {
303 history_str_free(buf);
304 return (NULL);
305 }
306
307 buf[HIS_MAX_RECORD_LEN -1] = '\0';
308
309 return (buf);
310 }
311
312 /*
313 * Return non-zero if the spa version is less than requested version.
314 */
315 static int
zfs_earlier_version(const char * name,int version)316 zfs_earlier_version(const char *name, int version)
317 {
318 spa_t *spa;
319
320 if (spa_open(name, &spa, FTAG) == 0) {
321 if (spa_version(spa) < version) {
322 spa_close(spa, FTAG);
323 return (1);
324 }
325 spa_close(spa, FTAG);
326 }
327 return (0);
328 }
329
330 /*
331 * Return TRUE if the ZPL version is less than requested version.
332 */
333 static boolean_t
zpl_earlier_version(const char * name,int version)334 zpl_earlier_version(const char *name, int version)
335 {
336 objset_t *os;
337 boolean_t rc = B_TRUE;
338
339 if (dmu_objset_hold(name, FTAG, &os) == 0) {
340 uint64_t zplversion;
341
342 if (dmu_objset_type(os) != DMU_OST_ZFS) {
343 dmu_objset_rele(os, FTAG);
344 return (B_TRUE);
345 }
346 /* XXX reading from non-owned objset */
347 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &zplversion) == 0)
348 rc = zplversion < version;
349 dmu_objset_rele(os, FTAG);
350 }
351 return (rc);
352 }
353
354 static void
zfs_log_history(zfs_cmd_t * zc)355 zfs_log_history(zfs_cmd_t *zc)
356 {
357 spa_t *spa;
358 char *buf;
359
360 if ((buf = history_str_get(zc)) == NULL)
361 return;
362
363 if (spa_open(zc->zc_name, &spa, FTAG) == 0) {
364 if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY)
365 (void) spa_history_log(spa, buf);
366 spa_close(spa, FTAG);
367 }
368 history_str_free(buf);
369 }
370
371 /*
372 * Policy for top-level read operations (list pools). Requires no privileges,
373 * and can be used in the local zone, as there is no associated dataset.
374 */
375 static int
zfs_secpolicy_none(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)376 zfs_secpolicy_none(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
377 {
378 (void) zc, (void) innvl, (void) cr;
379 return (0);
380 }
381
382 /*
383 * Policy for dataset read operations (list children, get statistics). Requires
384 * no privileges, but must be visible in the local zone.
385 */
386 static int
zfs_secpolicy_read(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)387 zfs_secpolicy_read(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
388 {
389 (void) innvl, (void) cr;
390 if (INGLOBALZONE(curproc) ||
391 zone_dataset_visible(zc->zc_name, NULL))
392 return (0);
393
394 return (SET_ERROR(ENOENT));
395 }
396
397 static int
zfs_dozonecheck_impl(const char * dataset,uint64_t zoned,cred_t * cr)398 zfs_dozonecheck_impl(const char *dataset, uint64_t zoned, cred_t *cr)
399 {
400 int writable = 1;
401
402 /*
403 * The dataset must be visible by this zone -- check this first
404 * so they don't see EPERM on something they shouldn't know about.
405 */
406 if (!INGLOBALZONE(curproc) &&
407 !zone_dataset_visible(dataset, &writable))
408 return (SET_ERROR(ENOENT));
409
410 if (INGLOBALZONE(curproc)) {
411 /*
412 * If the fs is zoned, only root can access it from the
413 * global zone.
414 */
415 if (secpolicy_zfs(cr) && zoned)
416 return (SET_ERROR(EPERM));
417 } else {
418 /*
419 * If we are in a local zone, the 'zoned' property must be set.
420 */
421 if (!zoned)
422 return (SET_ERROR(EPERM));
423
424 /* must be writable by this zone */
425 if (!writable)
426 return (SET_ERROR(EPERM));
427 }
428 return (0);
429 }
430
431 static int
zfs_dozonecheck(const char * dataset,cred_t * cr)432 zfs_dozonecheck(const char *dataset, cred_t *cr)
433 {
434 uint64_t zoned;
435
436 if (dsl_prop_get_integer(dataset, zfs_prop_to_name(ZFS_PROP_ZONED),
437 &zoned, NULL))
438 return (SET_ERROR(ENOENT));
439
440 return (zfs_dozonecheck_impl(dataset, zoned, cr));
441 }
442
443 static int
zfs_dozonecheck_ds(const char * dataset,dsl_dataset_t * ds,cred_t * cr)444 zfs_dozonecheck_ds(const char *dataset, dsl_dataset_t *ds, cred_t *cr)
445 {
446 uint64_t zoned;
447
448 if (dsl_prop_get_int_ds(ds, zfs_prop_to_name(ZFS_PROP_ZONED), &zoned))
449 return (SET_ERROR(ENOENT));
450
451 return (zfs_dozonecheck_impl(dataset, zoned, cr));
452 }
453
454 static int
zfs_secpolicy_write_perms_ds(const char * name,dsl_dataset_t * ds,const char * perm,cred_t * cr)455 zfs_secpolicy_write_perms_ds(const char *name, dsl_dataset_t *ds,
456 const char *perm, cred_t *cr)
457 {
458 int error;
459
460 error = zfs_dozonecheck_ds(name, ds, cr);
461 if (error == 0) {
462 error = secpolicy_zfs(cr);
463 if (error != 0)
464 error = dsl_deleg_access_impl(ds, perm, cr);
465 }
466 return (error);
467 }
468
469 static int
zfs_secpolicy_write_perms(const char * name,const char * perm,cred_t * cr)470 zfs_secpolicy_write_perms(const char *name, const char *perm, cred_t *cr)
471 {
472 int error;
473 dsl_dataset_t *ds;
474 dsl_pool_t *dp;
475
476 /*
477 * First do a quick check for root in the global zone, which
478 * is allowed to do all write_perms. This ensures that zfs_ioc_*
479 * will get to handle nonexistent datasets.
480 */
481 if (INGLOBALZONE(curproc) && secpolicy_zfs(cr) == 0)
482 return (0);
483
484 error = dsl_pool_hold(name, FTAG, &dp);
485 if (error != 0)
486 return (error);
487
488 error = dsl_dataset_hold(dp, name, FTAG, &ds);
489 if (error != 0) {
490 dsl_pool_rele(dp, FTAG);
491 return (error);
492 }
493
494 error = zfs_secpolicy_write_perms_ds(name, ds, perm, cr);
495
496 dsl_dataset_rele(ds, FTAG);
497 dsl_pool_rele(dp, FTAG);
498 return (error);
499 }
500
501 /*
502 * Policy for setting the security label property.
503 *
504 * Returns 0 for success, non-zero for access and other errors.
505 */
506 static int
zfs_set_slabel_policy(const char * name,const char * strval,cred_t * cr)507 zfs_set_slabel_policy(const char *name, const char *strval, cred_t *cr)
508 {
509 #ifdef HAVE_MLSLABEL
510 char ds_hexsl[MAXNAMELEN];
511 bslabel_t ds_sl, new_sl;
512 boolean_t new_default = FALSE;
513 uint64_t zoned;
514 int needed_priv = -1;
515 int error;
516
517 /* First get the existing dataset label. */
518 error = dsl_prop_get(name, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
519 1, sizeof (ds_hexsl), &ds_hexsl, NULL);
520 if (error != 0)
521 return (SET_ERROR(EPERM));
522
523 if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
524 new_default = TRUE;
525
526 /* The label must be translatable */
527 if (!new_default && (hexstr_to_label(strval, &new_sl) != 0))
528 return (SET_ERROR(EINVAL));
529
530 /*
531 * In a non-global zone, disallow attempts to set a label that
532 * doesn't match that of the zone; otherwise no other checks
533 * are needed.
534 */
535 if (!INGLOBALZONE(curproc)) {
536 if (new_default || !blequal(&new_sl, CR_SL(CRED())))
537 return (SET_ERROR(EPERM));
538 return (0);
539 }
540
541 /*
542 * For global-zone datasets (i.e., those whose zoned property is
543 * "off", verify that the specified new label is valid for the
544 * global zone.
545 */
546 if (dsl_prop_get_integer(name,
547 zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL))
548 return (SET_ERROR(EPERM));
549 if (!zoned) {
550 if (zfs_check_global_label(name, strval) != 0)
551 return (SET_ERROR(EPERM));
552 }
553
554 /*
555 * If the existing dataset label is nondefault, check if the
556 * dataset is mounted (label cannot be changed while mounted).
557 * Get the zfsvfs_t; if there isn't one, then the dataset isn't
558 * mounted (or isn't a dataset, doesn't exist, ...).
559 */
560 if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) {
561 objset_t *os;
562 static const char *setsl_tag = "setsl_tag";
563
564 /*
565 * Try to own the dataset; abort if there is any error,
566 * (e.g., already mounted, in use, or other error).
567 */
568 error = dmu_objset_own(name, DMU_OST_ZFS, B_TRUE, B_TRUE,
569 setsl_tag, &os);
570 if (error != 0)
571 return (SET_ERROR(EPERM));
572
573 dmu_objset_disown(os, B_TRUE, setsl_tag);
574
575 if (new_default) {
576 needed_priv = PRIV_FILE_DOWNGRADE_SL;
577 goto out_check;
578 }
579
580 if (hexstr_to_label(strval, &new_sl) != 0)
581 return (SET_ERROR(EPERM));
582
583 if (blstrictdom(&ds_sl, &new_sl))
584 needed_priv = PRIV_FILE_DOWNGRADE_SL;
585 else if (blstrictdom(&new_sl, &ds_sl))
586 needed_priv = PRIV_FILE_UPGRADE_SL;
587 } else {
588 /* dataset currently has a default label */
589 if (!new_default)
590 needed_priv = PRIV_FILE_UPGRADE_SL;
591 }
592
593 out_check:
594 if (needed_priv != -1)
595 return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL));
596 return (0);
597 #else
598 return (SET_ERROR(ENOTSUP));
599 #endif /* HAVE_MLSLABEL */
600 }
601
602 static int
zfs_secpolicy_setprop(const char * dsname,zfs_prop_t prop,nvpair_t * propval,cred_t * cr)603 zfs_secpolicy_setprop(const char *dsname, zfs_prop_t prop, nvpair_t *propval,
604 cred_t *cr)
605 {
606 const char *strval;
607
608 /*
609 * Check permissions for special properties.
610 */
611 switch (prop) {
612 default:
613 break;
614 case ZFS_PROP_ZONED:
615 /*
616 * Disallow setting of 'zoned' from within a local zone.
617 */
618 if (!INGLOBALZONE(curproc))
619 return (SET_ERROR(EPERM));
620 break;
621
622 case ZFS_PROP_QUOTA:
623 case ZFS_PROP_FILESYSTEM_LIMIT:
624 case ZFS_PROP_SNAPSHOT_LIMIT:
625 if (!INGLOBALZONE(curproc)) {
626 uint64_t zoned;
627 char setpoint[ZFS_MAX_DATASET_NAME_LEN];
628 /*
629 * Unprivileged users are allowed to modify the
630 * limit on things *under* (ie. contained by)
631 * the thing they own.
632 */
633 if (dsl_prop_get_integer(dsname,
634 zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, setpoint))
635 return (SET_ERROR(EPERM));
636 if (!zoned || strlen(dsname) <= strlen(setpoint))
637 return (SET_ERROR(EPERM));
638 }
639 break;
640
641 case ZFS_PROP_MLSLABEL:
642 if (!is_system_labeled())
643 return (SET_ERROR(EPERM));
644
645 if (nvpair_value_string(propval, &strval) == 0) {
646 int err;
647
648 err = zfs_set_slabel_policy(dsname, strval, CRED());
649 if (err != 0)
650 return (err);
651 }
652 break;
653 }
654
655 return (zfs_secpolicy_write_perms(dsname, zfs_prop_to_name(prop), cr));
656 }
657
658 static int
zfs_secpolicy_set_fsacl(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)659 zfs_secpolicy_set_fsacl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
660 {
661 /*
662 * permission to set permissions will be evaluated later in
663 * dsl_deleg_can_allow()
664 */
665 (void) innvl;
666 return (zfs_dozonecheck(zc->zc_name, cr));
667 }
668
669 static int
zfs_secpolicy_rollback(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)670 zfs_secpolicy_rollback(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
671 {
672 (void) innvl;
673 return (zfs_secpolicy_write_perms(zc->zc_name,
674 ZFS_DELEG_PERM_ROLLBACK, cr));
675 }
676
677 static int
zfs_secpolicy_send(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)678 zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
679 {
680 (void) innvl;
681 dsl_pool_t *dp;
682 dsl_dataset_t *ds;
683 const char *cp;
684 int error;
685
686 /*
687 * Generate the current snapshot name from the given objsetid, then
688 * use that name for the secpolicy/zone checks.
689 */
690 cp = strchr(zc->zc_name, '@');
691 if (cp == NULL)
692 return (SET_ERROR(EINVAL));
693 error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
694 if (error != 0)
695 return (error);
696
697 error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds);
698 if (error != 0) {
699 dsl_pool_rele(dp, FTAG);
700 return (error);
701 }
702
703 dsl_dataset_name(ds, zc->zc_name);
704
705 error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
706 ZFS_DELEG_PERM_SEND, cr);
707 dsl_dataset_rele(ds, FTAG);
708 dsl_pool_rele(dp, FTAG);
709
710 return (error);
711 }
712
713 static int
zfs_secpolicy_send_new(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)714 zfs_secpolicy_send_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
715 {
716 (void) innvl;
717 return (zfs_secpolicy_write_perms(zc->zc_name,
718 ZFS_DELEG_PERM_SEND, cr));
719 }
720
721 static int
zfs_secpolicy_share(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)722 zfs_secpolicy_share(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
723 {
724 (void) zc, (void) innvl, (void) cr;
725 return (SET_ERROR(ENOTSUP));
726 }
727
728 static int
zfs_secpolicy_smb_acl(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)729 zfs_secpolicy_smb_acl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
730 {
731 (void) zc, (void) innvl, (void) cr;
732 return (SET_ERROR(ENOTSUP));
733 }
734
735 static int
zfs_get_parent(const char * datasetname,char * parent,int parentsize)736 zfs_get_parent(const char *datasetname, char *parent, int parentsize)
737 {
738 char *cp;
739
740 /*
741 * Remove the @bla or /bla from the end of the name to get the parent.
742 */
743 (void) strlcpy(parent, datasetname, parentsize);
744 cp = strrchr(parent, '@');
745 if (cp != NULL) {
746 cp[0] = '\0';
747 } else {
748 cp = strrchr(parent, '/');
749 if (cp == NULL)
750 return (SET_ERROR(ENOENT));
751 cp[0] = '\0';
752 }
753
754 return (0);
755 }
756
757 int
zfs_secpolicy_destroy_perms(const char * name,cred_t * cr)758 zfs_secpolicy_destroy_perms(const char *name, cred_t *cr)
759 {
760 int error;
761
762 if ((error = zfs_secpolicy_write_perms(name,
763 ZFS_DELEG_PERM_MOUNT, cr)) != 0)
764 return (error);
765
766 return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_DESTROY, cr));
767 }
768
769 static int
zfs_secpolicy_destroy(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)770 zfs_secpolicy_destroy(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
771 {
772 (void) innvl;
773 return (zfs_secpolicy_destroy_perms(zc->zc_name, cr));
774 }
775
776 /*
777 * Destroying snapshots with delegated permissions requires
778 * descendant mount and destroy permissions.
779 */
780 static int
zfs_secpolicy_destroy_snaps(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)781 zfs_secpolicy_destroy_snaps(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
782 {
783 (void) zc;
784 nvlist_t *snaps;
785 nvpair_t *pair, *nextpair;
786 int error = 0;
787
788 snaps = fnvlist_lookup_nvlist(innvl, "snaps");
789
790 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
791 pair = nextpair) {
792 nextpair = nvlist_next_nvpair(snaps, pair);
793 error = zfs_secpolicy_destroy_perms(nvpair_name(pair), cr);
794 if (error == ENOENT) {
795 /*
796 * Ignore any snapshots that don't exist (we consider
797 * them "already destroyed"). Remove the name from the
798 * nvl here in case the snapshot is created between
799 * now and when we try to destroy it (in which case
800 * we don't want to destroy it since we haven't
801 * checked for permission).
802 */
803 fnvlist_remove_nvpair(snaps, pair);
804 error = 0;
805 }
806 if (error != 0)
807 break;
808 }
809
810 return (error);
811 }
812
813 int
zfs_secpolicy_rename_perms(const char * from,const char * to,cred_t * cr)814 zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr)
815 {
816 char parentname[ZFS_MAX_DATASET_NAME_LEN];
817 int error;
818
819 if ((error = zfs_secpolicy_write_perms(from,
820 ZFS_DELEG_PERM_RENAME, cr)) != 0)
821 return (error);
822
823 if ((error = zfs_secpolicy_write_perms(from,
824 ZFS_DELEG_PERM_MOUNT, cr)) != 0)
825 return (error);
826
827 if ((error = zfs_get_parent(to, parentname,
828 sizeof (parentname))) != 0)
829 return (error);
830
831 if ((error = zfs_secpolicy_write_perms(parentname,
832 ZFS_DELEG_PERM_CREATE, cr)) != 0)
833 return (error);
834
835 if ((error = zfs_secpolicy_write_perms(parentname,
836 ZFS_DELEG_PERM_MOUNT, cr)) != 0)
837 return (error);
838
839 return (error);
840 }
841
842 static int
zfs_secpolicy_rename(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)843 zfs_secpolicy_rename(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
844 {
845 (void) innvl;
846 return (zfs_secpolicy_rename_perms(zc->zc_name, zc->zc_value, cr));
847 }
848
849 static int
zfs_secpolicy_promote(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)850 zfs_secpolicy_promote(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
851 {
852 (void) innvl;
853 dsl_pool_t *dp;
854 dsl_dataset_t *clone;
855 int error;
856
857 error = zfs_secpolicy_write_perms(zc->zc_name,
858 ZFS_DELEG_PERM_PROMOTE, cr);
859 if (error != 0)
860 return (error);
861
862 error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
863 if (error != 0)
864 return (error);
865
866 error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &clone);
867
868 if (error == 0) {
869 char parentname[ZFS_MAX_DATASET_NAME_LEN];
870 dsl_dataset_t *origin = NULL;
871 dsl_dir_t *dd;
872 dd = clone->ds_dir;
873
874 error = dsl_dataset_hold_obj(dd->dd_pool,
875 dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin);
876 if (error != 0) {
877 dsl_dataset_rele(clone, FTAG);
878 dsl_pool_rele(dp, FTAG);
879 return (error);
880 }
881
882 error = zfs_secpolicy_write_perms_ds(zc->zc_name, clone,
883 ZFS_DELEG_PERM_MOUNT, cr);
884
885 dsl_dataset_name(origin, parentname);
886 if (error == 0) {
887 error = zfs_secpolicy_write_perms_ds(parentname, origin,
888 ZFS_DELEG_PERM_PROMOTE, cr);
889 }
890 dsl_dataset_rele(clone, FTAG);
891 dsl_dataset_rele(origin, FTAG);
892 }
893 dsl_pool_rele(dp, FTAG);
894 return (error);
895 }
896
897 static int
zfs_secpolicy_recv(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)898 zfs_secpolicy_recv(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
899 {
900 (void) innvl;
901 int error;
902
903 if ((error = zfs_secpolicy_write_perms(zc->zc_name,
904 ZFS_DELEG_PERM_RECEIVE, cr)) != 0)
905 return (error);
906
907 if ((error = zfs_secpolicy_write_perms(zc->zc_name,
908 ZFS_DELEG_PERM_MOUNT, cr)) != 0)
909 return (error);
910
911 return (zfs_secpolicy_write_perms(zc->zc_name,
912 ZFS_DELEG_PERM_CREATE, cr));
913 }
914
915 int
zfs_secpolicy_snapshot_perms(const char * name,cred_t * cr)916 zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr)
917 {
918 return (zfs_secpolicy_write_perms(name,
919 ZFS_DELEG_PERM_SNAPSHOT, cr));
920 }
921
922 /*
923 * Check for permission to create each snapshot in the nvlist.
924 */
925 static int
zfs_secpolicy_snapshot(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)926 zfs_secpolicy_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
927 {
928 (void) zc;
929 nvlist_t *snaps;
930 int error = 0;
931 nvpair_t *pair;
932
933 snaps = fnvlist_lookup_nvlist(innvl, "snaps");
934
935 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
936 pair = nvlist_next_nvpair(snaps, pair)) {
937 char *name = (char *)nvpair_name(pair);
938 char *atp = strchr(name, '@');
939
940 if (atp == NULL) {
941 error = SET_ERROR(EINVAL);
942 break;
943 }
944 *atp = '\0';
945 error = zfs_secpolicy_snapshot_perms(name, cr);
946 *atp = '@';
947 if (error != 0)
948 break;
949 }
950 return (error);
951 }
952
953 /*
954 * Check for permission to create each bookmark in the nvlist.
955 */
956 static int
zfs_secpolicy_bookmark(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)957 zfs_secpolicy_bookmark(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
958 {
959 (void) zc;
960 int error = 0;
961
962 for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
963 pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
964 char *name = (char *)nvpair_name(pair);
965 char *hashp = strchr(name, '#');
966
967 if (hashp == NULL) {
968 error = SET_ERROR(EINVAL);
969 break;
970 }
971 *hashp = '\0';
972 error = zfs_secpolicy_write_perms(name,
973 ZFS_DELEG_PERM_BOOKMARK, cr);
974 *hashp = '#';
975 if (error != 0)
976 break;
977 }
978 return (error);
979 }
980
981 static int
zfs_secpolicy_destroy_bookmarks(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)982 zfs_secpolicy_destroy_bookmarks(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
983 {
984 (void) zc;
985 nvpair_t *pair, *nextpair;
986 int error = 0;
987
988 for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL;
989 pair = nextpair) {
990 char *name = (char *)nvpair_name(pair);
991 char *hashp = strchr(name, '#');
992 nextpair = nvlist_next_nvpair(innvl, pair);
993
994 if (hashp == NULL) {
995 error = SET_ERROR(EINVAL);
996 break;
997 }
998
999 *hashp = '\0';
1000 error = zfs_secpolicy_write_perms(name,
1001 ZFS_DELEG_PERM_DESTROY, cr);
1002 *hashp = '#';
1003 if (error == ENOENT) {
1004 /*
1005 * Ignore any filesystems that don't exist (we consider
1006 * their bookmarks "already destroyed"). Remove
1007 * the name from the nvl here in case the filesystem
1008 * is created between now and when we try to destroy
1009 * the bookmark (in which case we don't want to
1010 * destroy it since we haven't checked for permission).
1011 */
1012 fnvlist_remove_nvpair(innvl, pair);
1013 error = 0;
1014 }
1015 if (error != 0)
1016 break;
1017 }
1018
1019 return (error);
1020 }
1021
1022 static int
zfs_secpolicy_log_history(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1023 zfs_secpolicy_log_history(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1024 {
1025 (void) zc, (void) innvl, (void) cr;
1026 /*
1027 * Even root must have a proper TSD so that we know what pool
1028 * to log to.
1029 */
1030 if (tsd_get(zfs_allow_log_key) == NULL)
1031 return (SET_ERROR(EPERM));
1032 return (0);
1033 }
1034
1035 static int
zfs_secpolicy_create_clone(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1036 zfs_secpolicy_create_clone(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1037 {
1038 char parentname[ZFS_MAX_DATASET_NAME_LEN];
1039 int error;
1040 const char *origin;
1041
1042 if ((error = zfs_get_parent(zc->zc_name, parentname,
1043 sizeof (parentname))) != 0)
1044 return (error);
1045
1046 if (nvlist_lookup_string(innvl, "origin", &origin) == 0 &&
1047 (error = zfs_secpolicy_write_perms(origin,
1048 ZFS_DELEG_PERM_CLONE, cr)) != 0)
1049 return (error);
1050
1051 if ((error = zfs_secpolicy_write_perms(parentname,
1052 ZFS_DELEG_PERM_CREATE, cr)) != 0)
1053 return (error);
1054
1055 return (zfs_secpolicy_write_perms(parentname,
1056 ZFS_DELEG_PERM_MOUNT, cr));
1057 }
1058
1059 /*
1060 * Policy for pool operations - create/destroy pools, add vdevs, etc. Requires
1061 * SYS_CONFIG privilege, which is not available in a local zone.
1062 */
1063 int
zfs_secpolicy_config(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1064 zfs_secpolicy_config(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1065 {
1066 (void) zc, (void) innvl;
1067
1068 if (secpolicy_sys_config(cr, B_FALSE) != 0)
1069 return (SET_ERROR(EPERM));
1070
1071 return (0);
1072 }
1073
1074 /*
1075 * Policy for object to name lookups.
1076 */
1077 static int
zfs_secpolicy_diff(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1078 zfs_secpolicy_diff(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1079 {
1080 (void) innvl;
1081 int error;
1082
1083 if (secpolicy_sys_config(cr, B_FALSE) == 0)
1084 return (0);
1085
1086 error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr);
1087 return (error);
1088 }
1089
1090 /*
1091 * Policy for fault injection. Requires all privileges.
1092 */
1093 static int
zfs_secpolicy_inject(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1094 zfs_secpolicy_inject(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1095 {
1096 (void) zc, (void) innvl;
1097 return (secpolicy_zinject(cr));
1098 }
1099
1100 static int
zfs_secpolicy_inherit_prop(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1101 zfs_secpolicy_inherit_prop(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1102 {
1103 (void) innvl;
1104 zfs_prop_t prop = zfs_name_to_prop(zc->zc_value);
1105
1106 if (prop == ZPROP_USERPROP) {
1107 if (!zfs_prop_user(zc->zc_value))
1108 return (SET_ERROR(EINVAL));
1109 return (zfs_secpolicy_write_perms(zc->zc_name,
1110 ZFS_DELEG_PERM_USERPROP, cr));
1111 } else {
1112 return (zfs_secpolicy_setprop(zc->zc_name, prop,
1113 NULL, cr));
1114 }
1115 }
1116
1117 static int
zfs_secpolicy_userspace_one(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1118 zfs_secpolicy_userspace_one(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1119 {
1120 int err = zfs_secpolicy_read(zc, innvl, cr);
1121 if (err)
1122 return (err);
1123
1124 if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
1125 return (SET_ERROR(EINVAL));
1126
1127 if (zc->zc_value[0] == 0) {
1128 /*
1129 * They are asking about a posix uid/gid. If it's
1130 * themself, allow it.
1131 */
1132 if (zc->zc_objset_type == ZFS_PROP_USERUSED ||
1133 zc->zc_objset_type == ZFS_PROP_USERQUOTA ||
1134 zc->zc_objset_type == ZFS_PROP_USEROBJUSED ||
1135 zc->zc_objset_type == ZFS_PROP_USEROBJQUOTA) {
1136 if (zc->zc_guid == crgetuid(cr))
1137 return (0);
1138 } else if (zc->zc_objset_type == ZFS_PROP_GROUPUSED ||
1139 zc->zc_objset_type == ZFS_PROP_GROUPQUOTA ||
1140 zc->zc_objset_type == ZFS_PROP_GROUPOBJUSED ||
1141 zc->zc_objset_type == ZFS_PROP_GROUPOBJQUOTA) {
1142 if (groupmember(zc->zc_guid, cr))
1143 return (0);
1144 }
1145 /* else is for project quota/used */
1146 }
1147
1148 return (zfs_secpolicy_write_perms(zc->zc_name,
1149 userquota_perms[zc->zc_objset_type], cr));
1150 }
1151
1152 static int
zfs_secpolicy_userspace_many(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1153 zfs_secpolicy_userspace_many(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1154 {
1155 int err = zfs_secpolicy_read(zc, innvl, cr);
1156 if (err)
1157 return (err);
1158
1159 if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
1160 return (SET_ERROR(EINVAL));
1161
1162 return (zfs_secpolicy_write_perms(zc->zc_name,
1163 userquota_perms[zc->zc_objset_type], cr));
1164 }
1165
1166 static int
zfs_secpolicy_userspace_upgrade(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1167 zfs_secpolicy_userspace_upgrade(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1168 {
1169 (void) innvl;
1170 return (zfs_secpolicy_setprop(zc->zc_name, ZFS_PROP_VERSION,
1171 NULL, cr));
1172 }
1173
1174 static int
zfs_secpolicy_hold(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1175 zfs_secpolicy_hold(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1176 {
1177 (void) zc;
1178 nvpair_t *pair;
1179 nvlist_t *holds;
1180 int error;
1181
1182 holds = fnvlist_lookup_nvlist(innvl, "holds");
1183
1184 for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
1185 pair = nvlist_next_nvpair(holds, pair)) {
1186 char fsname[ZFS_MAX_DATASET_NAME_LEN];
1187 error = dmu_fsname(nvpair_name(pair), fsname);
1188 if (error != 0)
1189 return (error);
1190 error = zfs_secpolicy_write_perms(fsname,
1191 ZFS_DELEG_PERM_HOLD, cr);
1192 if (error != 0)
1193 return (error);
1194 }
1195 return (0);
1196 }
1197
1198 static int
zfs_secpolicy_release(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1199 zfs_secpolicy_release(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1200 {
1201 (void) zc;
1202 nvpair_t *pair;
1203 int error;
1204
1205 for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL;
1206 pair = nvlist_next_nvpair(innvl, pair)) {
1207 char fsname[ZFS_MAX_DATASET_NAME_LEN];
1208 error = dmu_fsname(nvpair_name(pair), fsname);
1209 if (error != 0)
1210 return (error);
1211 error = zfs_secpolicy_write_perms(fsname,
1212 ZFS_DELEG_PERM_RELEASE, cr);
1213 if (error != 0)
1214 return (error);
1215 }
1216 return (0);
1217 }
1218
1219 /*
1220 * Policy for allowing temporary snapshots to be taken or released
1221 */
1222 static int
zfs_secpolicy_tmp_snapshot(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1223 zfs_secpolicy_tmp_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1224 {
1225 /*
1226 * A temporary snapshot is the same as a snapshot,
1227 * hold, destroy and release all rolled into one.
1228 * Delegated diff alone is sufficient that we allow this.
1229 */
1230 int error;
1231
1232 if (zfs_secpolicy_write_perms(zc->zc_name,
1233 ZFS_DELEG_PERM_DIFF, cr) == 0)
1234 return (0);
1235
1236 error = zfs_secpolicy_snapshot_perms(zc->zc_name, cr);
1237
1238 if (innvl != NULL) {
1239 if (error == 0)
1240 error = zfs_secpolicy_hold(zc, innvl, cr);
1241 if (error == 0)
1242 error = zfs_secpolicy_release(zc, innvl, cr);
1243 if (error == 0)
1244 error = zfs_secpolicy_destroy(zc, innvl, cr);
1245 }
1246 return (error);
1247 }
1248
1249 static int
zfs_secpolicy_load_key(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1250 zfs_secpolicy_load_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1251 {
1252 return (zfs_secpolicy_write_perms(zc->zc_name,
1253 ZFS_DELEG_PERM_LOAD_KEY, cr));
1254 }
1255
1256 static int
zfs_secpolicy_change_key(zfs_cmd_t * zc,nvlist_t * innvl,cred_t * cr)1257 zfs_secpolicy_change_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
1258 {
1259 return (zfs_secpolicy_write_perms(zc->zc_name,
1260 ZFS_DELEG_PERM_CHANGE_KEY, cr));
1261 }
1262
1263 /*
1264 * Returns the nvlist as specified by the user in the zfs_cmd_t.
1265 */
1266 static int
get_nvlist(uint64_t nvl,uint64_t size,int iflag,nvlist_t ** nvp)1267 get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp)
1268 {
1269 char *packed;
1270 int error;
1271 nvlist_t *list = NULL;
1272
1273 /*
1274 * Read in and unpack the user-supplied nvlist.
1275 */
1276 if (size == 0)
1277 return (SET_ERROR(EINVAL));
1278
1279 packed = vmem_alloc(size, KM_SLEEP);
1280
1281 if (ddi_copyin((void *)(uintptr_t)nvl, packed, size, iflag) != 0) {
1282 vmem_free(packed, size);
1283 return (SET_ERROR(EFAULT));
1284 }
1285
1286 if ((error = nvlist_unpack(packed, size, &list, 0)) != 0) {
1287 vmem_free(packed, size);
1288 return (error);
1289 }
1290
1291 vmem_free(packed, size);
1292
1293 *nvp = list;
1294 return (0);
1295 }
1296
1297 /*
1298 * Reduce the size of this nvlist until it can be serialized in 'max' bytes.
1299 * Entries will be removed from the end of the nvlist, and one int32 entry
1300 * named "N_MORE_ERRORS" will be added indicating how many entries were
1301 * removed.
1302 */
1303 static int
nvlist_smush(nvlist_t * errors,size_t max)1304 nvlist_smush(nvlist_t *errors, size_t max)
1305 {
1306 size_t size;
1307
1308 size = fnvlist_size(errors);
1309
1310 if (size > max) {
1311 nvpair_t *more_errors;
1312 int n = 0;
1313
1314 if (max < 1024)
1315 return (SET_ERROR(ENOMEM));
1316
1317 fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, 0);
1318 more_errors = nvlist_prev_nvpair(errors, NULL);
1319
1320 do {
1321 nvpair_t *pair = nvlist_prev_nvpair(errors,
1322 more_errors);
1323 fnvlist_remove_nvpair(errors, pair);
1324 n++;
1325 size = fnvlist_size(errors);
1326 } while (size > max);
1327
1328 fnvlist_remove_nvpair(errors, more_errors);
1329 fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, n);
1330 ASSERT3U(fnvlist_size(errors), <=, max);
1331 }
1332
1333 return (0);
1334 }
1335
1336 static int
put_nvlist(zfs_cmd_t * zc,nvlist_t * nvl)1337 put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl)
1338 {
1339 char *packed = NULL;
1340 int error = 0;
1341 size_t size;
1342
1343 size = fnvlist_size(nvl);
1344
1345 if (size > zc->zc_nvlist_dst_size) {
1346 error = SET_ERROR(ENOMEM);
1347 } else {
1348 packed = fnvlist_pack(nvl, &size);
1349 if (ddi_copyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst,
1350 size, zc->zc_iflags) != 0)
1351 error = SET_ERROR(EFAULT);
1352 fnvlist_pack_free(packed, size);
1353 }
1354
1355 zc->zc_nvlist_dst_size = size;
1356 zc->zc_nvlist_dst_filled = B_TRUE;
1357 return (error);
1358 }
1359
1360 int
getzfsvfs_impl(objset_t * os,zfsvfs_t ** zfvp)1361 getzfsvfs_impl(objset_t *os, zfsvfs_t **zfvp)
1362 {
1363 int error = 0;
1364 if (dmu_objset_type(os) != DMU_OST_ZFS) {
1365 return (SET_ERROR(EINVAL));
1366 }
1367
1368 mutex_enter(&os->os_user_ptr_lock);
1369 *zfvp = dmu_objset_get_user(os);
1370 /* bump s_active only when non-zero to prevent umount race */
1371 error = zfs_vfs_ref(zfvp);
1372 mutex_exit(&os->os_user_ptr_lock);
1373 return (error);
1374 }
1375
1376 int
getzfsvfs(const char * dsname,zfsvfs_t ** zfvp)1377 getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
1378 {
1379 objset_t *os;
1380 int error;
1381
1382 error = dmu_objset_hold(dsname, FTAG, &os);
1383 if (error != 0)
1384 return (error);
1385
1386 error = getzfsvfs_impl(os, zfvp);
1387 dmu_objset_rele(os, FTAG);
1388 return (error);
1389 }
1390
1391 /*
1392 * Find a zfsvfs_t for a mounted filesystem, or create our own, in which
1393 * case its z_sb will be NULL, and it will be opened as the owner.
1394 * If 'writer' is set, the z_teardown_lock will be held for RW_WRITER,
1395 * which prevents all inode ops from running.
1396 */
1397 static int
zfsvfs_hold(const char * name,const void * tag,zfsvfs_t ** zfvp,boolean_t writer)1398 zfsvfs_hold(const char *name, const void *tag, zfsvfs_t **zfvp,
1399 boolean_t writer)
1400 {
1401 int error = 0;
1402
1403 if (getzfsvfs(name, zfvp) != 0)
1404 error = zfsvfs_create(name, B_FALSE, zfvp);
1405 if (error == 0) {
1406 if (writer)
1407 ZFS_TEARDOWN_ENTER_WRITE(*zfvp, tag);
1408 else
1409 ZFS_TEARDOWN_ENTER_READ(*zfvp, tag);
1410 if ((*zfvp)->z_unmounted) {
1411 /*
1412 * XXX we could probably try again, since the unmounting
1413 * thread should be just about to disassociate the
1414 * objset from the zfsvfs.
1415 */
1416 ZFS_TEARDOWN_EXIT(*zfvp, tag);
1417 return (SET_ERROR(EBUSY));
1418 }
1419 }
1420 return (error);
1421 }
1422
1423 static void
zfsvfs_rele(zfsvfs_t * zfsvfs,const void * tag)1424 zfsvfs_rele(zfsvfs_t *zfsvfs, const void *tag)
1425 {
1426 ZFS_TEARDOWN_EXIT(zfsvfs, tag);
1427
1428 if (zfs_vfs_held(zfsvfs)) {
1429 zfs_vfs_rele(zfsvfs);
1430 } else {
1431 dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
1432 zfsvfs_free(zfsvfs);
1433 }
1434 }
1435
1436 static int
zfs_ioc_pool_create(zfs_cmd_t * zc)1437 zfs_ioc_pool_create(zfs_cmd_t *zc)
1438 {
1439 int error;
1440 nvlist_t *config, *props = NULL;
1441 nvlist_t *rootprops = NULL;
1442 nvlist_t *zplprops = NULL;
1443 dsl_crypto_params_t *dcp = NULL;
1444 const char *spa_name = zc->zc_name;
1445 boolean_t unload_wkey = B_TRUE;
1446
1447 if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
1448 zc->zc_iflags, &config)))
1449 return (error);
1450
1451 if (zc->zc_nvlist_src_size != 0 && (error =
1452 get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
1453 zc->zc_iflags, &props))) {
1454 nvlist_free(config);
1455 return (error);
1456 }
1457
1458 if (props) {
1459 nvlist_t *nvl = NULL;
1460 nvlist_t *hidden_args = NULL;
1461 uint64_t version = SPA_VERSION;
1462 const char *tname;
1463
1464 (void) nvlist_lookup_uint64(props,
1465 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version);
1466 if (!SPA_VERSION_IS_SUPPORTED(version)) {
1467 error = SET_ERROR(EINVAL);
1468 goto pool_props_bad;
1469 }
1470 (void) nvlist_lookup_nvlist(props, ZPOOL_ROOTFS_PROPS, &nvl);
1471 if (nvl) {
1472 error = nvlist_dup(nvl, &rootprops, KM_SLEEP);
1473 if (error != 0)
1474 goto pool_props_bad;
1475 (void) nvlist_remove_all(props, ZPOOL_ROOTFS_PROPS);
1476 }
1477
1478 (void) nvlist_lookup_nvlist(props, ZPOOL_HIDDEN_ARGS,
1479 &hidden_args);
1480 error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
1481 rootprops, hidden_args, &dcp);
1482 if (error != 0)
1483 goto pool_props_bad;
1484 (void) nvlist_remove_all(props, ZPOOL_HIDDEN_ARGS);
1485
1486 VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1487 error = zfs_fill_zplprops_root(version, rootprops,
1488 zplprops, NULL);
1489 if (error != 0)
1490 goto pool_props_bad;
1491
1492 if (nvlist_lookup_string(props,
1493 zpool_prop_to_name(ZPOOL_PROP_TNAME), &tname) == 0)
1494 spa_name = tname;
1495 }
1496
1497 error = spa_create(zc->zc_name, config, props, zplprops, dcp);
1498
1499 /*
1500 * Set the remaining root properties
1501 */
1502 if (!error && (error = zfs_set_prop_nvlist(spa_name,
1503 ZPROP_SRC_LOCAL, rootprops, NULL)) != 0) {
1504 (void) spa_destroy(spa_name);
1505 unload_wkey = B_FALSE; /* spa_destroy() unloads wrapping keys */
1506 }
1507
1508 pool_props_bad:
1509 nvlist_free(rootprops);
1510 nvlist_free(zplprops);
1511 nvlist_free(config);
1512 nvlist_free(props);
1513 dsl_crypto_params_free(dcp, unload_wkey && !!error);
1514
1515 return (error);
1516 }
1517
1518 static int
zfs_ioc_pool_destroy(zfs_cmd_t * zc)1519 zfs_ioc_pool_destroy(zfs_cmd_t *zc)
1520 {
1521 int error;
1522 zfs_log_history(zc);
1523 error = spa_destroy(zc->zc_name);
1524
1525 return (error);
1526 }
1527
1528 static int
zfs_ioc_pool_import(zfs_cmd_t * zc)1529 zfs_ioc_pool_import(zfs_cmd_t *zc)
1530 {
1531 nvlist_t *config, *props = NULL;
1532 uint64_t guid;
1533 int error;
1534
1535 if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
1536 zc->zc_iflags, &config)) != 0)
1537 return (error);
1538
1539 if (zc->zc_nvlist_src_size != 0 && (error =
1540 get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
1541 zc->zc_iflags, &props))) {
1542 nvlist_free(config);
1543 return (error);
1544 }
1545
1546 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
1547 guid != zc->zc_guid)
1548 error = SET_ERROR(EINVAL);
1549 else
1550 error = spa_import(zc->zc_name, config, props, zc->zc_cookie);
1551
1552 if (zc->zc_nvlist_dst != 0) {
1553 int err;
1554
1555 if ((err = put_nvlist(zc, config)) != 0)
1556 error = err;
1557 }
1558
1559 nvlist_free(config);
1560 nvlist_free(props);
1561
1562 return (error);
1563 }
1564
1565 static int
zfs_ioc_pool_export(zfs_cmd_t * zc)1566 zfs_ioc_pool_export(zfs_cmd_t *zc)
1567 {
1568 int error;
1569 boolean_t force = (boolean_t)zc->zc_cookie;
1570 boolean_t hardforce = (boolean_t)zc->zc_guid;
1571
1572 zfs_log_history(zc);
1573 error = spa_export(zc->zc_name, NULL, force, hardforce);
1574
1575 return (error);
1576 }
1577
1578 static int
zfs_ioc_pool_configs(zfs_cmd_t * zc)1579 zfs_ioc_pool_configs(zfs_cmd_t *zc)
1580 {
1581 nvlist_t *configs;
1582 int error;
1583
1584 error = spa_all_configs(&zc->zc_cookie, &configs);
1585 if (error)
1586 return (error);
1587
1588 error = put_nvlist(zc, configs);
1589
1590 nvlist_free(configs);
1591
1592 return (error);
1593 }
1594
1595 /*
1596 * inputs:
1597 * zc_name name of the pool
1598 *
1599 * outputs:
1600 * zc_cookie real errno
1601 * zc_nvlist_dst config nvlist
1602 * zc_nvlist_dst_size size of config nvlist
1603 */
1604 static int
zfs_ioc_pool_stats(zfs_cmd_t * zc)1605 zfs_ioc_pool_stats(zfs_cmd_t *zc)
1606 {
1607 nvlist_t *config;
1608 int error;
1609 int ret = 0;
1610
1611 error = spa_get_stats(zc->zc_name, &config, zc->zc_value,
1612 sizeof (zc->zc_value));
1613
1614 if (config != NULL) {
1615 ret = put_nvlist(zc, config);
1616 nvlist_free(config);
1617
1618 /*
1619 * The config may be present even if 'error' is non-zero.
1620 * In this case we return success, and preserve the real errno
1621 * in 'zc_cookie'.
1622 */
1623 zc->zc_cookie = error;
1624 } else {
1625 ret = error;
1626 }
1627
1628 return (ret);
1629 }
1630
1631 /*
1632 * Try to import the given pool, returning pool stats as appropriate so that
1633 * user land knows which devices are available and overall pool health.
1634 */
1635 static int
zfs_ioc_pool_tryimport(zfs_cmd_t * zc)1636 zfs_ioc_pool_tryimport(zfs_cmd_t *zc)
1637 {
1638 nvlist_t *tryconfig, *config = NULL;
1639 int error;
1640
1641 if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
1642 zc->zc_iflags, &tryconfig)) != 0)
1643 return (error);
1644
1645 config = spa_tryimport(tryconfig);
1646
1647 nvlist_free(tryconfig);
1648
1649 if (config == NULL)
1650 return (SET_ERROR(EINVAL));
1651
1652 error = put_nvlist(zc, config);
1653 nvlist_free(config);
1654
1655 return (error);
1656 }
1657
1658 /*
1659 * inputs:
1660 * zc_name name of the pool
1661 * zc_cookie scan func (pool_scan_func_t)
1662 * zc_flags scrub pause/resume flag (pool_scrub_cmd_t)
1663 */
1664 static int
zfs_ioc_pool_scan(zfs_cmd_t * zc)1665 zfs_ioc_pool_scan(zfs_cmd_t *zc)
1666 {
1667 spa_t *spa;
1668 int error;
1669
1670 if (zc->zc_flags >= POOL_SCRUB_FLAGS_END)
1671 return (SET_ERROR(EINVAL));
1672
1673 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
1674 return (error);
1675
1676 if (zc->zc_flags == POOL_SCRUB_PAUSE)
1677 error = spa_scrub_pause_resume(spa, POOL_SCRUB_PAUSE);
1678 else if (zc->zc_cookie == POOL_SCAN_NONE)
1679 error = spa_scan_stop(spa);
1680 else
1681 error = spa_scan(spa, zc->zc_cookie);
1682
1683 spa_close(spa, FTAG);
1684
1685 return (error);
1686 }
1687
1688 /*
1689 * inputs:
1690 * poolname name of the pool
1691 * scan_type scan func (pool_scan_func_t)
1692 * scan_command scrub pause/resume flag (pool_scrub_cmd_t)
1693 */
1694 static const zfs_ioc_key_t zfs_keys_pool_scrub[] = {
1695 {"scan_type", DATA_TYPE_UINT64, 0},
1696 {"scan_command", DATA_TYPE_UINT64, 0},
1697 };
1698
1699 static int
zfs_ioc_pool_scrub(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)1700 zfs_ioc_pool_scrub(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
1701 {
1702 spa_t *spa;
1703 int error;
1704 uint64_t scan_type, scan_cmd;
1705
1706 if (nvlist_lookup_uint64(innvl, "scan_type", &scan_type) != 0)
1707 return (SET_ERROR(EINVAL));
1708 if (nvlist_lookup_uint64(innvl, "scan_command", &scan_cmd) != 0)
1709 return (SET_ERROR(EINVAL));
1710
1711 if (scan_cmd >= POOL_SCRUB_FLAGS_END)
1712 return (SET_ERROR(EINVAL));
1713
1714 if ((error = spa_open(poolname, &spa, FTAG)) != 0)
1715 return (error);
1716
1717 if (scan_cmd == POOL_SCRUB_PAUSE) {
1718 error = spa_scrub_pause_resume(spa, POOL_SCRUB_PAUSE);
1719 } else if (scan_type == POOL_SCAN_NONE) {
1720 error = spa_scan_stop(spa);
1721 } else if (scan_cmd == POOL_SCRUB_FROM_LAST_TXG) {
1722 error = spa_scan_range(spa, scan_type,
1723 spa_get_last_scrubbed_txg(spa), 0);
1724 } else {
1725 error = spa_scan(spa, scan_type);
1726 }
1727
1728 spa_close(spa, FTAG);
1729 return (error);
1730 }
1731
1732 static int
zfs_ioc_pool_freeze(zfs_cmd_t * zc)1733 zfs_ioc_pool_freeze(zfs_cmd_t *zc)
1734 {
1735 spa_t *spa;
1736 int error;
1737
1738 error = spa_open(zc->zc_name, &spa, FTAG);
1739 if (error == 0) {
1740 spa_freeze(spa);
1741 spa_close(spa, FTAG);
1742 }
1743 return (error);
1744 }
1745
1746 static int
zfs_ioc_pool_upgrade(zfs_cmd_t * zc)1747 zfs_ioc_pool_upgrade(zfs_cmd_t *zc)
1748 {
1749 spa_t *spa;
1750 int error;
1751
1752 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
1753 return (error);
1754
1755 if (zc->zc_cookie < spa_version(spa) ||
1756 !SPA_VERSION_IS_SUPPORTED(zc->zc_cookie)) {
1757 spa_close(spa, FTAG);
1758 return (SET_ERROR(EINVAL));
1759 }
1760
1761 spa_upgrade(spa, zc->zc_cookie);
1762 spa_close(spa, FTAG);
1763
1764 return (error);
1765 }
1766
1767 static int
zfs_ioc_pool_get_history(zfs_cmd_t * zc)1768 zfs_ioc_pool_get_history(zfs_cmd_t *zc)
1769 {
1770 spa_t *spa;
1771 char *hist_buf;
1772 uint64_t size;
1773 int error;
1774
1775 if ((size = zc->zc_history_len) == 0)
1776 return (SET_ERROR(EINVAL));
1777
1778 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
1779 return (error);
1780
1781 if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) {
1782 spa_close(spa, FTAG);
1783 return (SET_ERROR(ENOTSUP));
1784 }
1785
1786 hist_buf = vmem_alloc(size, KM_SLEEP);
1787 if ((error = spa_history_get(spa, &zc->zc_history_offset,
1788 &zc->zc_history_len, hist_buf)) == 0) {
1789 error = ddi_copyout(hist_buf,
1790 (void *)(uintptr_t)zc->zc_history,
1791 zc->zc_history_len, zc->zc_iflags);
1792 }
1793
1794 spa_close(spa, FTAG);
1795 vmem_free(hist_buf, size);
1796 return (error);
1797 }
1798
1799 /*
1800 * inputs:
1801 * zc_nvlist_src nvlist optionally containing ZPOOL_REGUID_GUID
1802 * zc_nvlist_src_size size of the nvlist
1803 */
1804 static int
zfs_ioc_pool_reguid(zfs_cmd_t * zc)1805 zfs_ioc_pool_reguid(zfs_cmd_t *zc)
1806 {
1807 uint64_t *guidp = NULL;
1808 nvlist_t *props = NULL;
1809 spa_t *spa;
1810 uint64_t guid;
1811 int error;
1812
1813 if (zc->zc_nvlist_src_size != 0) {
1814 error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
1815 zc->zc_iflags, &props);
1816 if (error != 0)
1817 return (error);
1818
1819 error = nvlist_lookup_uint64(props, ZPOOL_REGUID_GUID, &guid);
1820 if (error == 0)
1821 guidp = &guid;
1822 else if (error == ENOENT)
1823 guidp = NULL;
1824 else
1825 goto out;
1826 }
1827
1828 error = spa_open(zc->zc_name, &spa, FTAG);
1829 if (error == 0) {
1830 error = spa_change_guid(spa, guidp);
1831 spa_close(spa, FTAG);
1832 }
1833
1834 out:
1835 if (props != NULL)
1836 nvlist_free(props);
1837
1838 return (error);
1839 }
1840
1841 static int
zfs_ioc_dsobj_to_dsname(zfs_cmd_t * zc)1842 zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc)
1843 {
1844 return (dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value));
1845 }
1846
1847 /*
1848 * inputs:
1849 * zc_name name of filesystem
1850 * zc_obj object to find
1851 *
1852 * outputs:
1853 * zc_value name of object
1854 */
1855 static int
zfs_ioc_obj_to_path(zfs_cmd_t * zc)1856 zfs_ioc_obj_to_path(zfs_cmd_t *zc)
1857 {
1858 objset_t *os;
1859 int error;
1860
1861 /* XXX reading from objset not owned */
1862 if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE,
1863 FTAG, &os)) != 0)
1864 return (error);
1865 if (dmu_objset_type(os) != DMU_OST_ZFS) {
1866 dmu_objset_rele_flags(os, B_TRUE, FTAG);
1867 return (SET_ERROR(EINVAL));
1868 }
1869 error = zfs_obj_to_path(os, zc->zc_obj, zc->zc_value,
1870 sizeof (zc->zc_value));
1871 dmu_objset_rele_flags(os, B_TRUE, FTAG);
1872
1873 return (error);
1874 }
1875
1876 /*
1877 * inputs:
1878 * zc_name name of filesystem
1879 * zc_obj object to find
1880 *
1881 * outputs:
1882 * zc_stat stats on object
1883 * zc_value path to object
1884 */
1885 static int
zfs_ioc_obj_to_stats(zfs_cmd_t * zc)1886 zfs_ioc_obj_to_stats(zfs_cmd_t *zc)
1887 {
1888 objset_t *os;
1889 int error;
1890
1891 /* XXX reading from objset not owned */
1892 if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE,
1893 FTAG, &os)) != 0)
1894 return (error);
1895 if (dmu_objset_type(os) != DMU_OST_ZFS) {
1896 dmu_objset_rele_flags(os, B_TRUE, FTAG);
1897 return (SET_ERROR(EINVAL));
1898 }
1899 error = zfs_obj_to_stats(os, zc->zc_obj, &zc->zc_stat, zc->zc_value,
1900 sizeof (zc->zc_value));
1901 dmu_objset_rele_flags(os, B_TRUE, FTAG);
1902
1903 return (error);
1904 }
1905
1906 static int
zfs_ioc_vdev_add(zfs_cmd_t * zc)1907 zfs_ioc_vdev_add(zfs_cmd_t *zc)
1908 {
1909 spa_t *spa;
1910 int error;
1911 nvlist_t *config;
1912
1913 error = spa_open(zc->zc_name, &spa, FTAG);
1914 if (error != 0)
1915 return (error);
1916
1917 error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
1918 zc->zc_iflags, &config);
1919 if (error == 0) {
1920 error = spa_vdev_add(spa, config, zc->zc_flags);
1921 nvlist_free(config);
1922 }
1923 spa_close(spa, FTAG);
1924 return (error);
1925 }
1926
1927 /*
1928 * inputs:
1929 * zc_name name of the pool
1930 * zc_guid guid of vdev to remove
1931 * zc_cookie cancel removal
1932 */
1933 static int
zfs_ioc_vdev_remove(zfs_cmd_t * zc)1934 zfs_ioc_vdev_remove(zfs_cmd_t *zc)
1935 {
1936 spa_t *spa;
1937 int error;
1938
1939 error = spa_open(zc->zc_name, &spa, FTAG);
1940 if (error != 0)
1941 return (error);
1942 if (zc->zc_cookie != 0) {
1943 error = spa_vdev_remove_cancel(spa);
1944 } else {
1945 error = spa_vdev_remove(spa, zc->zc_guid, B_FALSE);
1946 }
1947 spa_close(spa, FTAG);
1948 return (error);
1949 }
1950
1951 static int
zfs_ioc_vdev_set_state(zfs_cmd_t * zc)1952 zfs_ioc_vdev_set_state(zfs_cmd_t *zc)
1953 {
1954 spa_t *spa;
1955 int error;
1956 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
1957
1958 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
1959 return (error);
1960 switch (zc->zc_cookie) {
1961 case VDEV_STATE_ONLINE:
1962 error = vdev_online(spa, zc->zc_guid, zc->zc_obj, &newstate);
1963 break;
1964
1965 case VDEV_STATE_OFFLINE:
1966 error = vdev_offline(spa, zc->zc_guid, zc->zc_obj);
1967 break;
1968
1969 case VDEV_STATE_FAULTED:
1970 if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED &&
1971 zc->zc_obj != VDEV_AUX_EXTERNAL &&
1972 zc->zc_obj != VDEV_AUX_EXTERNAL_PERSIST)
1973 zc->zc_obj = VDEV_AUX_ERR_EXCEEDED;
1974
1975 error = vdev_fault(spa, zc->zc_guid, zc->zc_obj);
1976 break;
1977
1978 case VDEV_STATE_DEGRADED:
1979 if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED &&
1980 zc->zc_obj != VDEV_AUX_EXTERNAL)
1981 zc->zc_obj = VDEV_AUX_ERR_EXCEEDED;
1982
1983 error = vdev_degrade(spa, zc->zc_guid, zc->zc_obj);
1984 break;
1985
1986 case VDEV_STATE_REMOVED:
1987 error = vdev_remove_wanted(spa, zc->zc_guid);
1988 break;
1989
1990 default:
1991 error = SET_ERROR(EINVAL);
1992 }
1993 zc->zc_cookie = newstate;
1994 spa_close(spa, FTAG);
1995 return (error);
1996 }
1997
1998 static int
zfs_ioc_vdev_attach(zfs_cmd_t * zc)1999 zfs_ioc_vdev_attach(zfs_cmd_t *zc)
2000 {
2001 spa_t *spa;
2002 nvlist_t *config;
2003 int replacing = zc->zc_cookie;
2004 int rebuild = zc->zc_simple;
2005 int error;
2006
2007 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
2008 return (error);
2009
2010 if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
2011 zc->zc_iflags, &config)) == 0) {
2012 error = spa_vdev_attach(spa, zc->zc_guid, config, replacing,
2013 rebuild);
2014 nvlist_free(config);
2015 }
2016
2017 spa_close(spa, FTAG);
2018 return (error);
2019 }
2020
2021 static int
zfs_ioc_vdev_detach(zfs_cmd_t * zc)2022 zfs_ioc_vdev_detach(zfs_cmd_t *zc)
2023 {
2024 spa_t *spa;
2025 int error;
2026
2027 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
2028 return (error);
2029
2030 error = spa_vdev_detach(spa, zc->zc_guid, 0, B_FALSE);
2031
2032 spa_close(spa, FTAG);
2033 return (error);
2034 }
2035
2036 static int
zfs_ioc_vdev_split(zfs_cmd_t * zc)2037 zfs_ioc_vdev_split(zfs_cmd_t *zc)
2038 {
2039 spa_t *spa;
2040 nvlist_t *config, *props = NULL;
2041 int error;
2042 boolean_t exp = !!(zc->zc_cookie & ZPOOL_EXPORT_AFTER_SPLIT);
2043
2044 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
2045 return (error);
2046
2047 if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
2048 zc->zc_iflags, &config))) {
2049 spa_close(spa, FTAG);
2050 return (error);
2051 }
2052
2053 if (zc->zc_nvlist_src_size != 0 && (error =
2054 get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
2055 zc->zc_iflags, &props))) {
2056 spa_close(spa, FTAG);
2057 nvlist_free(config);
2058 return (error);
2059 }
2060
2061 error = spa_vdev_split_mirror(spa, zc->zc_string, config, props, exp);
2062
2063 spa_close(spa, FTAG);
2064
2065 nvlist_free(config);
2066 nvlist_free(props);
2067
2068 return (error);
2069 }
2070
2071 static int
zfs_ioc_vdev_setpath(zfs_cmd_t * zc)2072 zfs_ioc_vdev_setpath(zfs_cmd_t *zc)
2073 {
2074 spa_t *spa;
2075 const char *path = zc->zc_value;
2076 uint64_t guid = zc->zc_guid;
2077 int error;
2078
2079 error = spa_open(zc->zc_name, &spa, FTAG);
2080 if (error != 0)
2081 return (error);
2082
2083 error = spa_vdev_setpath(spa, guid, path);
2084 spa_close(spa, FTAG);
2085 return (error);
2086 }
2087
2088 static int
zfs_ioc_vdev_setfru(zfs_cmd_t * zc)2089 zfs_ioc_vdev_setfru(zfs_cmd_t *zc)
2090 {
2091 spa_t *spa;
2092 const char *fru = zc->zc_value;
2093 uint64_t guid = zc->zc_guid;
2094 int error;
2095
2096 error = spa_open(zc->zc_name, &spa, FTAG);
2097 if (error != 0)
2098 return (error);
2099
2100 error = spa_vdev_setfru(spa, guid, fru);
2101 spa_close(spa, FTAG);
2102 return (error);
2103 }
2104
2105 static int
zfs_ioc_objset_stats_impl(zfs_cmd_t * zc,objset_t * os)2106 zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os)
2107 {
2108 int error = 0;
2109 nvlist_t *nv;
2110
2111 dmu_objset_fast_stat(os, &zc->zc_objset_stats);
2112
2113 if (!zc->zc_simple && zc->zc_nvlist_dst != 0 &&
2114 (error = dsl_prop_get_all(os, &nv)) == 0) {
2115 dmu_objset_stats(os, nv);
2116 /*
2117 * NB: zvol_get_stats() will read the objset contents,
2118 * which we aren't supposed to do with a
2119 * DS_MODE_USER hold, because it could be
2120 * inconsistent. So this is a bit of a workaround...
2121 * XXX reading without owning
2122 */
2123 if (!zc->zc_objset_stats.dds_inconsistent &&
2124 dmu_objset_type(os) == DMU_OST_ZVOL) {
2125 error = zvol_get_stats(os, nv);
2126 if (error == EIO) {
2127 nvlist_free(nv);
2128 return (error);
2129 }
2130 VERIFY0(error);
2131 }
2132 if (error == 0)
2133 error = put_nvlist(zc, nv);
2134 nvlist_free(nv);
2135 }
2136
2137 return (error);
2138 }
2139
2140 /*
2141 * inputs:
2142 * zc_name name of filesystem
2143 * zc_nvlist_dst_size size of buffer for property nvlist
2144 *
2145 * outputs:
2146 * zc_objset_stats stats
2147 * zc_nvlist_dst property nvlist
2148 * zc_nvlist_dst_size size of property nvlist
2149 */
2150 static int
zfs_ioc_objset_stats(zfs_cmd_t * zc)2151 zfs_ioc_objset_stats(zfs_cmd_t *zc)
2152 {
2153 objset_t *os;
2154 int error;
2155
2156 error = dmu_objset_hold(zc->zc_name, FTAG, &os);
2157 if (error == 0) {
2158 error = zfs_ioc_objset_stats_impl(zc, os);
2159 dmu_objset_rele(os, FTAG);
2160 }
2161
2162 return (error);
2163 }
2164
2165 /*
2166 * inputs:
2167 * zc_name name of filesystem
2168 * zc_nvlist_dst_size size of buffer for property nvlist
2169 *
2170 * outputs:
2171 * zc_nvlist_dst received property nvlist
2172 * zc_nvlist_dst_size size of received property nvlist
2173 *
2174 * Gets received properties (distinct from local properties on or after
2175 * SPA_VERSION_RECVD_PROPS) for callers who want to differentiate received from
2176 * local property values.
2177 */
2178 static int
zfs_ioc_objset_recvd_props(zfs_cmd_t * zc)2179 zfs_ioc_objset_recvd_props(zfs_cmd_t *zc)
2180 {
2181 int error = 0;
2182 nvlist_t *nv;
2183
2184 /*
2185 * Without this check, we would return local property values if the
2186 * caller has not already received properties on or after
2187 * SPA_VERSION_RECVD_PROPS.
2188 */
2189 if (!dsl_prop_get_hasrecvd(zc->zc_name))
2190 return (SET_ERROR(ENOTSUP));
2191
2192 if (zc->zc_nvlist_dst != 0 &&
2193 (error = dsl_prop_get_received(zc->zc_name, &nv)) == 0) {
2194 error = put_nvlist(zc, nv);
2195 nvlist_free(nv);
2196 }
2197
2198 return (error);
2199 }
2200
2201 static int
nvl_add_zplprop(objset_t * os,nvlist_t * props,zfs_prop_t prop)2202 nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop)
2203 {
2204 uint64_t value;
2205 int error;
2206
2207 /*
2208 * zfs_get_zplprop() will either find a value or give us
2209 * the default value (if there is one).
2210 */
2211 if ((error = zfs_get_zplprop(os, prop, &value)) != 0)
2212 return (error);
2213 VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0);
2214 return (0);
2215 }
2216
2217 /*
2218 * inputs:
2219 * zc_name name of filesystem
2220 * zc_nvlist_dst_size size of buffer for zpl property nvlist
2221 *
2222 * outputs:
2223 * zc_nvlist_dst zpl property nvlist
2224 * zc_nvlist_dst_size size of zpl property nvlist
2225 */
2226 static int
zfs_ioc_objset_zplprops(zfs_cmd_t * zc)2227 zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
2228 {
2229 objset_t *os;
2230 int err;
2231
2232 /* XXX reading without owning */
2233 if ((err = dmu_objset_hold(zc->zc_name, FTAG, &os)))
2234 return (err);
2235
2236 dmu_objset_fast_stat(os, &zc->zc_objset_stats);
2237
2238 /*
2239 * NB: nvl_add_zplprop() will read the objset contents,
2240 * which we aren't supposed to do with a DS_MODE_USER
2241 * hold, because it could be inconsistent.
2242 */
2243 if (zc->zc_nvlist_dst != 0 &&
2244 !zc->zc_objset_stats.dds_inconsistent &&
2245 dmu_objset_type(os) == DMU_OST_ZFS) {
2246 nvlist_t *nv;
2247
2248 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2249 if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 &&
2250 (err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 &&
2251 (err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 &&
2252 (err = nvl_add_zplprop(os, nv, ZFS_PROP_CASE)) == 0)
2253 err = put_nvlist(zc, nv);
2254 nvlist_free(nv);
2255 } else {
2256 err = SET_ERROR(ENOENT);
2257 }
2258 dmu_objset_rele(os, FTAG);
2259 return (err);
2260 }
2261
2262 /*
2263 * inputs:
2264 * zc_name name of filesystem
2265 * zc_cookie zap cursor
2266 * zc_nvlist_dst_size size of buffer for property nvlist
2267 *
2268 * outputs:
2269 * zc_name name of next filesystem
2270 * zc_cookie zap cursor
2271 * zc_objset_stats stats
2272 * zc_nvlist_dst property nvlist
2273 * zc_nvlist_dst_size size of property nvlist
2274 */
2275 static int
zfs_ioc_dataset_list_next(zfs_cmd_t * zc)2276 zfs_ioc_dataset_list_next(zfs_cmd_t *zc)
2277 {
2278 objset_t *os;
2279 int error;
2280 char *p;
2281 size_t orig_len = strlen(zc->zc_name);
2282
2283 top:
2284 if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) {
2285 if (error == ENOENT)
2286 error = SET_ERROR(ESRCH);
2287 return (error);
2288 }
2289
2290 p = strrchr(zc->zc_name, '/');
2291 if (p == NULL || p[1] != '\0')
2292 (void) strlcat(zc->zc_name, "/", sizeof (zc->zc_name));
2293 p = zc->zc_name + strlen(zc->zc_name);
2294
2295 do {
2296 error = dmu_dir_list_next(os,
2297 sizeof (zc->zc_name) - (p - zc->zc_name), p,
2298 NULL, &zc->zc_cookie);
2299 if (error == ENOENT)
2300 error = SET_ERROR(ESRCH);
2301 } while (error == 0 && zfs_dataset_name_hidden(zc->zc_name));
2302 dmu_objset_rele(os, FTAG);
2303
2304 /*
2305 * If it's an internal dataset (ie. with a '$' in its name),
2306 * don't try to get stats for it, otherwise we'll return ENOENT.
2307 */
2308 if (error == 0 && strchr(zc->zc_name, '$') == NULL) {
2309 error = zfs_ioc_objset_stats(zc); /* fill in the stats */
2310 if (error == ENOENT) {
2311 /* We lost a race with destroy, get the next one. */
2312 zc->zc_name[orig_len] = '\0';
2313 goto top;
2314 }
2315 }
2316 return (error);
2317 }
2318
2319 /*
2320 * inputs:
2321 * zc_name name of filesystem
2322 * zc_cookie zap cursor
2323 * zc_nvlist_src iteration range nvlist
2324 * zc_nvlist_src_size size of iteration range nvlist
2325 *
2326 * outputs:
2327 * zc_name name of next snapshot
2328 * zc_objset_stats stats
2329 * zc_nvlist_dst property nvlist
2330 * zc_nvlist_dst_size size of property nvlist
2331 */
2332 static int
zfs_ioc_snapshot_list_next(zfs_cmd_t * zc)2333 zfs_ioc_snapshot_list_next(zfs_cmd_t *zc)
2334 {
2335 int error;
2336 objset_t *os, *ossnap;
2337 dsl_dataset_t *ds;
2338 uint64_t min_txg = 0, max_txg = 0;
2339
2340 if (zc->zc_nvlist_src_size != 0) {
2341 nvlist_t *props = NULL;
2342 error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
2343 zc->zc_iflags, &props);
2344 if (error != 0)
2345 return (error);
2346 (void) nvlist_lookup_uint64(props, SNAP_ITER_MIN_TXG,
2347 &min_txg);
2348 (void) nvlist_lookup_uint64(props, SNAP_ITER_MAX_TXG,
2349 &max_txg);
2350 nvlist_free(props);
2351 }
2352
2353 error = dmu_objset_hold(zc->zc_name, FTAG, &os);
2354 if (error != 0) {
2355 return (error == ENOENT ? SET_ERROR(ESRCH) : error);
2356 }
2357
2358 /*
2359 * A dataset name of maximum length cannot have any snapshots,
2360 * so exit immediately.
2361 */
2362 if (strlcat(zc->zc_name, "@", sizeof (zc->zc_name)) >=
2363 ZFS_MAX_DATASET_NAME_LEN) {
2364 dmu_objset_rele(os, FTAG);
2365 return (SET_ERROR(ESRCH));
2366 }
2367
2368 while (error == 0) {
2369 if (issig()) {
2370 error = SET_ERROR(EINTR);
2371 break;
2372 }
2373
2374 error = dmu_snapshot_list_next(os,
2375 sizeof (zc->zc_name) - strlen(zc->zc_name),
2376 zc->zc_name + strlen(zc->zc_name), &zc->zc_obj,
2377 &zc->zc_cookie, NULL);
2378 if (error == ENOENT) {
2379 error = SET_ERROR(ESRCH);
2380 break;
2381 } else if (error != 0) {
2382 break;
2383 }
2384
2385 error = dsl_dataset_hold_obj(dmu_objset_pool(os), zc->zc_obj,
2386 FTAG, &ds);
2387 if (error != 0)
2388 break;
2389
2390 if ((min_txg != 0 && dsl_get_creationtxg(ds) < min_txg) ||
2391 (max_txg != 0 && dsl_get_creationtxg(ds) > max_txg)) {
2392 dsl_dataset_rele(ds, FTAG);
2393 /* undo snapshot name append */
2394 *(strchr(zc->zc_name, '@') + 1) = '\0';
2395 /* skip snapshot */
2396 continue;
2397 }
2398
2399 if (zc->zc_simple) {
2400 dsl_dataset_fast_stat(ds, &zc->zc_objset_stats);
2401 dsl_dataset_rele(ds, FTAG);
2402 break;
2403 }
2404
2405 if ((error = dmu_objset_from_ds(ds, &ossnap)) != 0) {
2406 dsl_dataset_rele(ds, FTAG);
2407 break;
2408 }
2409 if ((error = zfs_ioc_objset_stats_impl(zc, ossnap)) != 0) {
2410 dsl_dataset_rele(ds, FTAG);
2411 break;
2412 }
2413 dsl_dataset_rele(ds, FTAG);
2414 break;
2415 }
2416
2417 dmu_objset_rele(os, FTAG);
2418 /* if we failed, undo the @ that we tacked on to zc_name */
2419 if (error != 0)
2420 *strchr(zc->zc_name, '@') = '\0';
2421 return (error);
2422 }
2423
2424 static int
zfs_prop_set_userquota(const char * dsname,nvpair_t * pair)2425 zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
2426 {
2427 const char *propname = nvpair_name(pair);
2428 uint64_t *valary;
2429 unsigned int vallen;
2430 const char *dash, *domain;
2431 zfs_userquota_prop_t type;
2432 uint64_t rid;
2433 uint64_t quota;
2434 zfsvfs_t *zfsvfs;
2435 int err;
2436
2437 if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
2438 nvlist_t *attrs;
2439 VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
2440 if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
2441 &pair) != 0)
2442 return (SET_ERROR(EINVAL));
2443 }
2444
2445 /*
2446 * A correctly constructed propname is encoded as
2447 * userquota@<rid>-<domain>.
2448 */
2449 if ((dash = strchr(propname, '-')) == NULL ||
2450 nvpair_value_uint64_array(pair, &valary, &vallen) != 0 ||
2451 vallen != 3)
2452 return (SET_ERROR(EINVAL));
2453
2454 domain = dash + 1;
2455 type = valary[0];
2456 rid = valary[1];
2457 quota = valary[2];
2458
2459 err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE);
2460 if (err == 0) {
2461 err = zfs_set_userquota(zfsvfs, type, domain, rid, quota);
2462 zfsvfs_rele(zfsvfs, FTAG);
2463 }
2464
2465 return (err);
2466 }
2467
2468 /*
2469 * If the named property is one that has a special function to set its value,
2470 * return 0 on success and a positive error code on failure; otherwise if it is
2471 * not one of the special properties handled by this function, return -1.
2472 *
2473 * XXX: It would be better for callers of the property interface if we handled
2474 * these special cases in dsl_prop.c (in the dsl layer).
2475 */
2476 static int
zfs_prop_set_special(const char * dsname,zprop_source_t source,nvpair_t * pair)2477 zfs_prop_set_special(const char *dsname, zprop_source_t source,
2478 nvpair_t *pair)
2479 {
2480 const char *propname = nvpair_name(pair);
2481 zfs_prop_t prop = zfs_name_to_prop(propname);
2482 uint64_t intval = 0;
2483 const char *strval = NULL;
2484 int err = -1;
2485
2486 if (prop == ZPROP_USERPROP) {
2487 if (zfs_prop_userquota(propname))
2488 return (zfs_prop_set_userquota(dsname, pair));
2489 return (-1);
2490 }
2491
2492 if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
2493 nvlist_t *attrs;
2494 VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
2495 VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
2496 &pair) == 0);
2497 }
2498
2499 /* all special properties are numeric except for keylocation */
2500 if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
2501 strval = fnvpair_value_string(pair);
2502 } else {
2503 intval = fnvpair_value_uint64(pair);
2504 }
2505
2506 switch (prop) {
2507 case ZFS_PROP_QUOTA:
2508 err = dsl_dir_set_quota(dsname, source, intval);
2509 break;
2510 case ZFS_PROP_REFQUOTA:
2511 err = dsl_dataset_set_refquota(dsname, source, intval);
2512 break;
2513 case ZFS_PROP_FILESYSTEM_LIMIT:
2514 case ZFS_PROP_SNAPSHOT_LIMIT:
2515 if (intval == UINT64_MAX) {
2516 /* clearing the limit, just do it */
2517 err = 0;
2518 } else {
2519 err = dsl_dir_activate_fs_ss_limit(dsname);
2520 }
2521 /*
2522 * Set err to -1 to force the zfs_set_prop_nvlist code down the
2523 * default path to set the value in the nvlist.
2524 */
2525 if (err == 0)
2526 err = -1;
2527 break;
2528 case ZFS_PROP_KEYLOCATION:
2529 err = dsl_crypto_can_set_keylocation(dsname, strval);
2530
2531 /*
2532 * Set err to -1 to force the zfs_set_prop_nvlist code down the
2533 * default path to set the value in the nvlist.
2534 */
2535 if (err == 0)
2536 err = -1;
2537 break;
2538 case ZFS_PROP_RESERVATION:
2539 err = dsl_dir_set_reservation(dsname, source, intval);
2540 break;
2541 case ZFS_PROP_REFRESERVATION:
2542 err = dsl_dataset_set_refreservation(dsname, source, intval);
2543 break;
2544 case ZFS_PROP_COMPRESSION:
2545 err = dsl_dataset_set_compression(dsname, source, intval);
2546 /*
2547 * Set err to -1 to force the zfs_set_prop_nvlist code down the
2548 * default path to set the value in the nvlist.
2549 */
2550 if (err == 0)
2551 err = -1;
2552 break;
2553 case ZFS_PROP_VOLSIZE:
2554 err = zvol_set_volsize(dsname, intval);
2555 break;
2556 case ZFS_PROP_VOLTHREADING:
2557 err = zvol_set_volthreading(dsname, intval);
2558 /*
2559 * Set err to -1 to force the zfs_set_prop_nvlist code down the
2560 * default path to set the value in the nvlist.
2561 */
2562 if (err == 0)
2563 err = -1;
2564 break;
2565 case ZFS_PROP_SNAPDEV:
2566 case ZFS_PROP_VOLMODE:
2567 err = zvol_set_common(dsname, prop, source, intval);
2568 break;
2569 case ZFS_PROP_READONLY:
2570 err = zvol_set_ro(dsname, intval);
2571 /*
2572 * Set err to -1 to force the zfs_set_prop_nvlist code down the
2573 * default path to set the value in the nvlist.
2574 */
2575 if (err == 0)
2576 err = -1;
2577 break;
2578 case ZFS_PROP_VERSION:
2579 {
2580 zfsvfs_t *zfsvfs;
2581
2582 if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0)
2583 break;
2584
2585 err = zfs_set_version(zfsvfs, intval);
2586 zfsvfs_rele(zfsvfs, FTAG);
2587
2588 if (err == 0 && intval >= ZPL_VERSION_USERSPACE) {
2589 zfs_cmd_t *zc;
2590
2591 zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
2592 (void) strlcpy(zc->zc_name, dsname,
2593 sizeof (zc->zc_name));
2594 (void) zfs_ioc_userspace_upgrade(zc);
2595 (void) zfs_ioc_id_quota_upgrade(zc);
2596 kmem_free(zc, sizeof (zfs_cmd_t));
2597 }
2598 break;
2599 }
2600 case ZFS_PROP_LONGNAME:
2601 {
2602 zfsvfs_t *zfsvfs;
2603
2604 /*
2605 * Ignore the checks if the property is being applied as part of
2606 * 'zfs receive'. Because, we already check if the local pool
2607 * has SPA_FEATURE_LONGNAME enabled in dmu_recv_begin_check().
2608 */
2609 if (source == ZPROP_SRC_RECEIVED) {
2610 cmn_err(CE_NOTE, "Skipping ZFS_PROP_LONGNAME checks "
2611 "for dsname=%s\n", dsname);
2612 err = -1;
2613 break;
2614 }
2615
2616 if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE)) != 0) {
2617 cmn_err(CE_WARN, "%s:%d Failed to hold for dsname=%s "
2618 "err=%d\n", __FILE__, __LINE__, dsname, err);
2619 break;
2620 }
2621
2622 if (!spa_feature_is_enabled(zfsvfs->z_os->os_spa,
2623 SPA_FEATURE_LONGNAME)) {
2624 err = ENOTSUP;
2625 } else {
2626 /*
2627 * Set err to -1 to force the zfs_set_prop_nvlist code
2628 * down the default path to set the value in the nvlist.
2629 */
2630 err = -1;
2631 }
2632 zfsvfs_rele(zfsvfs, FTAG);
2633 break;
2634 }
2635 default:
2636 err = -1;
2637 }
2638
2639 return (err);
2640 }
2641
2642 static boolean_t
zfs_is_namespace_prop(zfs_prop_t prop)2643 zfs_is_namespace_prop(zfs_prop_t prop)
2644 {
2645 switch (prop) {
2646
2647 case ZFS_PROP_ATIME:
2648 case ZFS_PROP_RELATIME:
2649 case ZFS_PROP_DEVICES:
2650 case ZFS_PROP_EXEC:
2651 case ZFS_PROP_SETUID:
2652 case ZFS_PROP_READONLY:
2653 case ZFS_PROP_XATTR:
2654 case ZFS_PROP_NBMAND:
2655 return (B_TRUE);
2656
2657 default:
2658 return (B_FALSE);
2659 }
2660 }
2661
2662 /*
2663 * This function is best effort. If it fails to set any of the given properties,
2664 * it continues to set as many as it can and returns the last error
2665 * encountered. If the caller provides a non-NULL errlist, it will be filled in
2666 * with the list of names of all the properties that failed along with the
2667 * corresponding error numbers.
2668 *
2669 * If every property is set successfully, zero is returned and errlist is not
2670 * modified.
2671 */
2672 int
zfs_set_prop_nvlist(const char * dsname,zprop_source_t source,nvlist_t * nvl,nvlist_t * errlist)2673 zfs_set_prop_nvlist(const char *dsname, zprop_source_t source, nvlist_t *nvl,
2674 nvlist_t *errlist)
2675 {
2676 nvpair_t *pair;
2677 nvpair_t *propval;
2678 int rv = 0;
2679 int err;
2680 uint64_t intval;
2681 const char *strval;
2682 boolean_t should_update_mount_cache = B_FALSE;
2683
2684 nvlist_t *genericnvl = fnvlist_alloc();
2685 nvlist_t *retrynvl = fnvlist_alloc();
2686 retry:
2687 pair = NULL;
2688 while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) {
2689 const char *propname = nvpair_name(pair);
2690 zfs_prop_t prop = zfs_name_to_prop(propname);
2691 err = 0;
2692
2693 /* decode the property value */
2694 propval = pair;
2695 if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
2696 nvlist_t *attrs;
2697 attrs = fnvpair_value_nvlist(pair);
2698 if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
2699 &propval) != 0)
2700 err = SET_ERROR(EINVAL);
2701 }
2702
2703 /* Validate value type */
2704 if (err == 0 && source == ZPROP_SRC_INHERITED) {
2705 /* inherited properties are expected to be booleans */
2706 if (nvpair_type(propval) != DATA_TYPE_BOOLEAN)
2707 err = SET_ERROR(EINVAL);
2708 } else if (err == 0 && prop == ZPROP_USERPROP) {
2709 if (zfs_prop_user(propname)) {
2710 if (nvpair_type(propval) != DATA_TYPE_STRING)
2711 err = SET_ERROR(EINVAL);
2712 } else if (zfs_prop_userquota(propname)) {
2713 if (nvpair_type(propval) !=
2714 DATA_TYPE_UINT64_ARRAY)
2715 err = SET_ERROR(EINVAL);
2716 } else {
2717 err = SET_ERROR(EINVAL);
2718 }
2719 } else if (err == 0) {
2720 if (nvpair_type(propval) == DATA_TYPE_STRING) {
2721 if (zfs_prop_get_type(prop) != PROP_TYPE_STRING)
2722 err = SET_ERROR(EINVAL);
2723 } else if (nvpair_type(propval) == DATA_TYPE_UINT64) {
2724 const char *unused;
2725
2726 intval = fnvpair_value_uint64(propval);
2727
2728 switch (zfs_prop_get_type(prop)) {
2729 case PROP_TYPE_NUMBER:
2730 break;
2731 case PROP_TYPE_STRING:
2732 err = SET_ERROR(EINVAL);
2733 break;
2734 case PROP_TYPE_INDEX:
2735 if (zfs_prop_index_to_string(prop,
2736 intval, &unused) != 0)
2737 err =
2738 SET_ERROR(ZFS_ERR_BADPROP);
2739 break;
2740 default:
2741 cmn_err(CE_PANIC,
2742 "unknown property type");
2743 }
2744 } else {
2745 err = SET_ERROR(EINVAL);
2746 }
2747 }
2748
2749 /* Validate permissions */
2750 if (err == 0)
2751 err = zfs_check_settable(dsname, pair, CRED());
2752
2753 if (err == 0) {
2754 if (source == ZPROP_SRC_INHERITED)
2755 err = -1; /* does not need special handling */
2756 else
2757 err = zfs_prop_set_special(dsname, source,
2758 pair);
2759 if (err == -1) {
2760 /*
2761 * For better performance we build up a list of
2762 * properties to set in a single transaction.
2763 */
2764 err = nvlist_add_nvpair(genericnvl, pair);
2765 } else if (err != 0 && nvl != retrynvl) {
2766 /*
2767 * This may be a spurious error caused by
2768 * receiving quota and reservation out of order.
2769 * Try again in a second pass.
2770 */
2771 err = nvlist_add_nvpair(retrynvl, pair);
2772 }
2773 }
2774
2775 if (err != 0) {
2776 if (errlist != NULL)
2777 fnvlist_add_int32(errlist, propname, err);
2778 rv = err;
2779 }
2780
2781 if (zfs_is_namespace_prop(prop))
2782 should_update_mount_cache = B_TRUE;
2783 }
2784
2785 if (nvl != retrynvl && !nvlist_empty(retrynvl)) {
2786 nvl = retrynvl;
2787 goto retry;
2788 }
2789
2790 if (nvlist_empty(genericnvl))
2791 goto out;
2792
2793 /*
2794 * Try to set them all in one batch.
2795 */
2796 err = dsl_props_set(dsname, source, genericnvl);
2797 if (err == 0)
2798 goto out;
2799
2800 /*
2801 * If batching fails, we still want to set as many properties as we
2802 * can, so try setting them individually.
2803 */
2804 pair = NULL;
2805 while ((pair = nvlist_next_nvpair(genericnvl, pair)) != NULL) {
2806 const char *propname = nvpair_name(pair);
2807
2808 propval = pair;
2809 if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
2810 nvlist_t *attrs;
2811 attrs = fnvpair_value_nvlist(pair);
2812 propval = fnvlist_lookup_nvpair(attrs, ZPROP_VALUE);
2813 }
2814
2815 if (nvpair_type(propval) == DATA_TYPE_STRING) {
2816 strval = fnvpair_value_string(propval);
2817 err = dsl_prop_set_string(dsname, propname,
2818 source, strval);
2819 } else if (nvpair_type(propval) == DATA_TYPE_BOOLEAN) {
2820 err = dsl_prop_inherit(dsname, propname, source);
2821 } else {
2822 intval = fnvpair_value_uint64(propval);
2823 err = dsl_prop_set_int(dsname, propname, source,
2824 intval);
2825 }
2826
2827 if (err != 0) {
2828 if (errlist != NULL) {
2829 fnvlist_add_int32(errlist, propname, err);
2830 }
2831 rv = err;
2832 }
2833 }
2834
2835 out:
2836 if (should_update_mount_cache)
2837 zfs_ioctl_update_mount_cache(dsname);
2838
2839 nvlist_free(genericnvl);
2840 nvlist_free(retrynvl);
2841
2842 return (rv);
2843 }
2844
2845 /*
2846 * Check that all the properties are valid user properties.
2847 */
2848 static int
zfs_check_userprops(nvlist_t * nvl)2849 zfs_check_userprops(nvlist_t *nvl)
2850 {
2851 nvpair_t *pair = NULL;
2852
2853 while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) {
2854 const char *propname = nvpair_name(pair);
2855
2856 if (!zfs_prop_user(propname) ||
2857 nvpair_type(pair) != DATA_TYPE_STRING)
2858 return (SET_ERROR(EINVAL));
2859
2860 if (strlen(propname) >= ZAP_MAXNAMELEN)
2861 return (SET_ERROR(ENAMETOOLONG));
2862
2863 if (strlen(fnvpair_value_string(pair)) >= ZAP_MAXVALUELEN)
2864 return (SET_ERROR(E2BIG));
2865 }
2866 return (0);
2867 }
2868
2869 static void
props_skip(nvlist_t * props,nvlist_t * skipped,nvlist_t ** newprops)2870 props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops)
2871 {
2872 nvpair_t *pair;
2873
2874 VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2875
2876 pair = NULL;
2877 while ((pair = nvlist_next_nvpair(props, pair)) != NULL) {
2878 if (nvlist_exists(skipped, nvpair_name(pair)))
2879 continue;
2880
2881 VERIFY(nvlist_add_nvpair(*newprops, pair) == 0);
2882 }
2883 }
2884
2885 static int
clear_received_props(const char * dsname,nvlist_t * props,nvlist_t * skipped)2886 clear_received_props(const char *dsname, nvlist_t *props,
2887 nvlist_t *skipped)
2888 {
2889 int err = 0;
2890 nvlist_t *cleared_props = NULL;
2891 props_skip(props, skipped, &cleared_props);
2892 if (!nvlist_empty(cleared_props)) {
2893 /*
2894 * Acts on local properties until the dataset has received
2895 * properties at least once on or after SPA_VERSION_RECVD_PROPS.
2896 */
2897 zprop_source_t flags = (ZPROP_SRC_NONE |
2898 (dsl_prop_get_hasrecvd(dsname) ? ZPROP_SRC_RECEIVED : 0));
2899 err = zfs_set_prop_nvlist(dsname, flags, cleared_props, NULL);
2900 }
2901 nvlist_free(cleared_props);
2902 return (err);
2903 }
2904
2905 /*
2906 * inputs:
2907 * zc_name name of filesystem
2908 * zc_value name of property to set
2909 * zc_nvlist_src{_size} nvlist of properties to apply
2910 * zc_cookie received properties flag
2911 *
2912 * outputs:
2913 * zc_nvlist_dst{_size} error for each unapplied received property
2914 */
2915 static int
zfs_ioc_set_prop(zfs_cmd_t * zc)2916 zfs_ioc_set_prop(zfs_cmd_t *zc)
2917 {
2918 nvlist_t *nvl;
2919 boolean_t received = zc->zc_cookie;
2920 zprop_source_t source = (received ? ZPROP_SRC_RECEIVED :
2921 ZPROP_SRC_LOCAL);
2922 nvlist_t *errors;
2923 int error;
2924
2925 if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
2926 zc->zc_iflags, &nvl)) != 0)
2927 return (error);
2928
2929 if (received) {
2930 nvlist_t *origprops;
2931
2932 if (dsl_prop_get_received(zc->zc_name, &origprops) == 0) {
2933 (void) clear_received_props(zc->zc_name,
2934 origprops, nvl);
2935 nvlist_free(origprops);
2936 }
2937
2938 error = dsl_prop_set_hasrecvd(zc->zc_name);
2939 }
2940
2941 errors = fnvlist_alloc();
2942 if (error == 0)
2943 error = zfs_set_prop_nvlist(zc->zc_name, source, nvl, errors);
2944
2945 if (zc->zc_nvlist_dst != 0 && errors != NULL) {
2946 (void) put_nvlist(zc, errors);
2947 }
2948
2949 nvlist_free(errors);
2950 nvlist_free(nvl);
2951 return (error);
2952 }
2953
2954 /*
2955 * inputs:
2956 * zc_name name of filesystem
2957 * zc_value name of property to inherit
2958 * zc_cookie revert to received value if TRUE
2959 *
2960 * outputs: none
2961 */
2962 static int
zfs_ioc_inherit_prop(zfs_cmd_t * zc)2963 zfs_ioc_inherit_prop(zfs_cmd_t *zc)
2964 {
2965 const char *propname = zc->zc_value;
2966 zfs_prop_t prop = zfs_name_to_prop(propname);
2967 boolean_t received = zc->zc_cookie;
2968 zprop_source_t source = (received
2969 ? ZPROP_SRC_NONE /* revert to received value, if any */
2970 : ZPROP_SRC_INHERITED); /* explicitly inherit */
2971 nvlist_t *dummy;
2972 nvpair_t *pair;
2973 zprop_type_t type;
2974 int err;
2975
2976 if (!received) {
2977 /*
2978 * Only check this in the non-received case. We want to allow
2979 * 'inherit -S' to revert non-inheritable properties like quota
2980 * and reservation to the received or default values even though
2981 * they are not considered inheritable.
2982 */
2983 if (prop != ZPROP_USERPROP && !zfs_prop_inheritable(prop))
2984 return (SET_ERROR(EINVAL));
2985 }
2986
2987 if (prop == ZPROP_USERPROP) {
2988 if (!zfs_prop_user(propname))
2989 return (SET_ERROR(EINVAL));
2990
2991 type = PROP_TYPE_STRING;
2992 } else if (prop == ZFS_PROP_VOLSIZE || prop == ZFS_PROP_VERSION) {
2993 return (SET_ERROR(EINVAL));
2994 } else {
2995 type = zfs_prop_get_type(prop);
2996 }
2997
2998 /*
2999 * zfs_prop_set_special() expects properties in the form of an
3000 * nvpair with type info.
3001 */
3002 dummy = fnvlist_alloc();
3003
3004 switch (type) {
3005 case PROP_TYPE_STRING:
3006 VERIFY(0 == nvlist_add_string(dummy, propname, ""));
3007 break;
3008 case PROP_TYPE_NUMBER:
3009 case PROP_TYPE_INDEX:
3010 VERIFY(0 == nvlist_add_uint64(dummy, propname, 0));
3011 break;
3012 default:
3013 err = SET_ERROR(EINVAL);
3014 goto errout;
3015 }
3016
3017 pair = nvlist_next_nvpair(dummy, NULL);
3018 if (pair == NULL) {
3019 err = SET_ERROR(EINVAL);
3020 } else {
3021 err = zfs_prop_set_special(zc->zc_name, source, pair);
3022 if (err == -1) /* property is not "special", needs handling */
3023 err = dsl_prop_inherit(zc->zc_name, zc->zc_value,
3024 source);
3025 }
3026
3027 errout:
3028 nvlist_free(dummy);
3029 return (err);
3030 }
3031
3032 static int
zfs_ioc_pool_set_props(zfs_cmd_t * zc)3033 zfs_ioc_pool_set_props(zfs_cmd_t *zc)
3034 {
3035 nvlist_t *props;
3036 spa_t *spa;
3037 int error;
3038 nvpair_t *pair;
3039
3040 if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
3041 zc->zc_iflags, &props)))
3042 return (error);
3043
3044 /*
3045 * If the only property is the configfile, then just do a spa_lookup()
3046 * to handle the faulted case.
3047 */
3048 pair = nvlist_next_nvpair(props, NULL);
3049 if (pair != NULL && strcmp(nvpair_name(pair),
3050 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE)) == 0 &&
3051 nvlist_next_nvpair(props, pair) == NULL) {
3052 mutex_enter(&spa_namespace_lock);
3053 if ((spa = spa_lookup(zc->zc_name)) != NULL) {
3054 spa_configfile_set(spa, props, B_FALSE);
3055 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
3056 }
3057 mutex_exit(&spa_namespace_lock);
3058 if (spa != NULL) {
3059 nvlist_free(props);
3060 return (0);
3061 }
3062 }
3063
3064 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) {
3065 nvlist_free(props);
3066 return (error);
3067 }
3068
3069 error = spa_prop_set(spa, props);
3070
3071 nvlist_free(props);
3072 spa_close(spa, FTAG);
3073
3074 return (error);
3075 }
3076
3077 /*
3078 * innvl: {
3079 * "get_props_names": [ "prop1", "prop2", ..., "propN" ]
3080 * }
3081 */
3082
3083 static const zfs_ioc_key_t zfs_keys_get_props[] = {
3084 { ZPOOL_GET_PROPS_NAMES, DATA_TYPE_STRING_ARRAY, ZK_OPTIONAL },
3085 };
3086
3087 static int
zfs_ioc_pool_get_props(const char * pool,nvlist_t * innvl,nvlist_t * outnvl)3088 zfs_ioc_pool_get_props(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
3089 {
3090 spa_t *spa;
3091 char **props = NULL;
3092 unsigned int n_props = 0;
3093 int error;
3094
3095 if (nvlist_lookup_string_array(innvl, ZPOOL_GET_PROPS_NAMES,
3096 &props, &n_props) != 0) {
3097 props = NULL;
3098 }
3099
3100 if ((error = spa_open(pool, &spa, FTAG)) != 0) {
3101 /*
3102 * If the pool is faulted, there may be properties we can still
3103 * get (such as altroot and cachefile), so attempt to get them
3104 * anyway.
3105 */
3106 mutex_enter(&spa_namespace_lock);
3107 if ((spa = spa_lookup(pool)) != NULL) {
3108 error = spa_prop_get(spa, outnvl);
3109 if (error == 0 && props != NULL)
3110 error = spa_prop_get_nvlist(spa, props, n_props,
3111 outnvl);
3112 }
3113 mutex_exit(&spa_namespace_lock);
3114 } else {
3115 error = spa_prop_get(spa, outnvl);
3116 if (error == 0 && props != NULL)
3117 error = spa_prop_get_nvlist(spa, props, n_props,
3118 outnvl);
3119 spa_close(spa, FTAG);
3120 }
3121
3122 return (error);
3123 }
3124
3125 /*
3126 * innvl: {
3127 * "vdevprops_set_vdev" -> guid
3128 * "vdevprops_set_props" -> { prop -> value }
3129 * }
3130 *
3131 * outnvl: propname -> error code (int32)
3132 */
3133 static const zfs_ioc_key_t zfs_keys_vdev_set_props[] = {
3134 {ZPOOL_VDEV_PROPS_SET_VDEV, DATA_TYPE_UINT64, 0},
3135 {ZPOOL_VDEV_PROPS_SET_PROPS, DATA_TYPE_NVLIST, 0}
3136 };
3137
3138 static int
zfs_ioc_vdev_set_props(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)3139 zfs_ioc_vdev_set_props(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
3140 {
3141 spa_t *spa;
3142 int error;
3143 vdev_t *vd;
3144 uint64_t vdev_guid;
3145
3146 /* Early validation */
3147 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
3148 &vdev_guid) != 0)
3149 return (SET_ERROR(EINVAL));
3150
3151 if (outnvl == NULL)
3152 return (SET_ERROR(EINVAL));
3153
3154 if ((error = spa_open(poolname, &spa, FTAG)) != 0)
3155 return (error);
3156
3157 ASSERT(spa_writeable(spa));
3158
3159 if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL) {
3160 spa_close(spa, FTAG);
3161 return (SET_ERROR(ENOENT));
3162 }
3163
3164 error = vdev_prop_set(vd, innvl, outnvl);
3165
3166 spa_close(spa, FTAG);
3167
3168 return (error);
3169 }
3170
3171 /*
3172 * innvl: {
3173 * "vdevprops_get_vdev" -> guid
3174 * (optional) "vdevprops_get_props" -> { propname -> propid }
3175 * }
3176 *
3177 * outnvl: propname -> value
3178 */
3179 static const zfs_ioc_key_t zfs_keys_vdev_get_props[] = {
3180 {ZPOOL_VDEV_PROPS_GET_VDEV, DATA_TYPE_UINT64, 0},
3181 {ZPOOL_VDEV_PROPS_GET_PROPS, DATA_TYPE_NVLIST, ZK_OPTIONAL}
3182 };
3183
3184 static int
zfs_ioc_vdev_get_props(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)3185 zfs_ioc_vdev_get_props(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
3186 {
3187 spa_t *spa;
3188 int error;
3189 vdev_t *vd;
3190 uint64_t vdev_guid;
3191
3192 /* Early validation */
3193 if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
3194 &vdev_guid) != 0)
3195 return (SET_ERROR(EINVAL));
3196
3197 if (outnvl == NULL)
3198 return (SET_ERROR(EINVAL));
3199
3200 if ((error = spa_open(poolname, &spa, FTAG)) != 0)
3201 return (error);
3202
3203 if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL) {
3204 spa_close(spa, FTAG);
3205 return (SET_ERROR(ENOENT));
3206 }
3207
3208 error = vdev_prop_get(vd, innvl, outnvl);
3209
3210 spa_close(spa, FTAG);
3211
3212 return (error);
3213 }
3214
3215 /*
3216 * inputs:
3217 * zc_name name of filesystem
3218 * zc_nvlist_src{_size} nvlist of delegated permissions
3219 * zc_perm_action allow/unallow flag
3220 *
3221 * outputs: none
3222 */
3223 static int
zfs_ioc_set_fsacl(zfs_cmd_t * zc)3224 zfs_ioc_set_fsacl(zfs_cmd_t *zc)
3225 {
3226 int error;
3227 nvlist_t *fsaclnv = NULL;
3228
3229 if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
3230 zc->zc_iflags, &fsaclnv)) != 0)
3231 return (error);
3232
3233 /*
3234 * Verify nvlist is constructed correctly
3235 */
3236 if (zfs_deleg_verify_nvlist(fsaclnv) != 0) {
3237 nvlist_free(fsaclnv);
3238 return (SET_ERROR(EINVAL));
3239 }
3240
3241 /*
3242 * If we don't have PRIV_SYS_MOUNT, then validate
3243 * that user is allowed to hand out each permission in
3244 * the nvlist(s)
3245 */
3246
3247 error = secpolicy_zfs(CRED());
3248 if (error != 0) {
3249 if (zc->zc_perm_action == B_FALSE) {
3250 error = dsl_deleg_can_allow(zc->zc_name,
3251 fsaclnv, CRED());
3252 } else {
3253 error = dsl_deleg_can_unallow(zc->zc_name,
3254 fsaclnv, CRED());
3255 }
3256 }
3257
3258 if (error == 0)
3259 error = dsl_deleg_set(zc->zc_name, fsaclnv, zc->zc_perm_action);
3260
3261 nvlist_free(fsaclnv);
3262 return (error);
3263 }
3264
3265 /*
3266 * inputs:
3267 * zc_name name of filesystem
3268 *
3269 * outputs:
3270 * zc_nvlist_src{_size} nvlist of delegated permissions
3271 */
3272 static int
zfs_ioc_get_fsacl(zfs_cmd_t * zc)3273 zfs_ioc_get_fsacl(zfs_cmd_t *zc)
3274 {
3275 nvlist_t *nvp;
3276 int error;
3277
3278 if ((error = dsl_deleg_get(zc->zc_name, &nvp)) == 0) {
3279 error = put_nvlist(zc, nvp);
3280 nvlist_free(nvp);
3281 }
3282
3283 return (error);
3284 }
3285
3286 static void
zfs_create_cb(objset_t * os,void * arg,cred_t * cr,dmu_tx_t * tx)3287 zfs_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3288 {
3289 zfs_creat_t *zct = arg;
3290
3291 zfs_create_fs(os, cr, zct->zct_zplprops, tx);
3292 }
3293
3294 #define ZFS_PROP_UNDEFINED ((uint64_t)-1)
3295
3296 /*
3297 * inputs:
3298 * os parent objset pointer (NULL if root fs)
3299 * fuids_ok fuids allowed in this version of the spa?
3300 * sa_ok SAs allowed in this version of the spa?
3301 * createprops list of properties requested by creator
3302 *
3303 * outputs:
3304 * zplprops values for the zplprops we attach to the master node object
3305 * is_ci true if requested file system will be purely case-insensitive
3306 *
3307 * Determine the settings for utf8only, normalization and
3308 * casesensitivity. Specific values may have been requested by the
3309 * creator and/or we can inherit values from the parent dataset. If
3310 * the file system is of too early a vintage, a creator can not
3311 * request settings for these properties, even if the requested
3312 * setting is the default value. We don't actually want to create dsl
3313 * properties for these, so remove them from the source nvlist after
3314 * processing.
3315 */
3316 static int
zfs_fill_zplprops_impl(objset_t * os,uint64_t zplver,boolean_t fuids_ok,boolean_t sa_ok,nvlist_t * createprops,nvlist_t * zplprops,boolean_t * is_ci)3317 zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
3318 boolean_t fuids_ok, boolean_t sa_ok, nvlist_t *createprops,
3319 nvlist_t *zplprops, boolean_t *is_ci)
3320 {
3321 uint64_t sense = ZFS_PROP_UNDEFINED;
3322 uint64_t norm = ZFS_PROP_UNDEFINED;
3323 uint64_t u8 = ZFS_PROP_UNDEFINED;
3324 int error;
3325
3326 ASSERT(zplprops != NULL);
3327
3328 /* parent dataset must be a filesystem */
3329 if (os != NULL && os->os_phys->os_type != DMU_OST_ZFS)
3330 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
3331
3332 /*
3333 * Pull out creator prop choices, if any.
3334 */
3335 if (createprops) {
3336 (void) nvlist_lookup_uint64(createprops,
3337 zfs_prop_to_name(ZFS_PROP_VERSION), &zplver);
3338 (void) nvlist_lookup_uint64(createprops,
3339 zfs_prop_to_name(ZFS_PROP_NORMALIZE), &norm);
3340 (void) nvlist_remove_all(createprops,
3341 zfs_prop_to_name(ZFS_PROP_NORMALIZE));
3342 (void) nvlist_lookup_uint64(createprops,
3343 zfs_prop_to_name(ZFS_PROP_UTF8ONLY), &u8);
3344 (void) nvlist_remove_all(createprops,
3345 zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
3346 (void) nvlist_lookup_uint64(createprops,
3347 zfs_prop_to_name(ZFS_PROP_CASE), &sense);
3348 (void) nvlist_remove_all(createprops,
3349 zfs_prop_to_name(ZFS_PROP_CASE));
3350 }
3351
3352 /*
3353 * If the zpl version requested is whacky or the file system
3354 * or pool is version is too "young" to support normalization
3355 * and the creator tried to set a value for one of the props,
3356 * error out.
3357 */
3358 if ((zplver < ZPL_VERSION_INITIAL || zplver > ZPL_VERSION) ||
3359 (zplver >= ZPL_VERSION_FUID && !fuids_ok) ||
3360 (zplver >= ZPL_VERSION_SA && !sa_ok) ||
3361 (zplver < ZPL_VERSION_NORMALIZATION &&
3362 (norm != ZFS_PROP_UNDEFINED || u8 != ZFS_PROP_UNDEFINED ||
3363 sense != ZFS_PROP_UNDEFINED)))
3364 return (SET_ERROR(ENOTSUP));
3365
3366 /*
3367 * Put the version in the zplprops
3368 */
3369 VERIFY(nvlist_add_uint64(zplprops,
3370 zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0);
3371
3372 if (norm == ZFS_PROP_UNDEFINED &&
3373 (error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm)) != 0)
3374 return (error);
3375 VERIFY(nvlist_add_uint64(zplprops,
3376 zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0);
3377
3378 /*
3379 * If we're normalizing, names must always be valid UTF-8 strings.
3380 */
3381 if (norm)
3382 u8 = 1;
3383 if (u8 == ZFS_PROP_UNDEFINED &&
3384 (error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8)) != 0)
3385 return (error);
3386 VERIFY(nvlist_add_uint64(zplprops,
3387 zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0);
3388
3389 if (sense == ZFS_PROP_UNDEFINED &&
3390 (error = zfs_get_zplprop(os, ZFS_PROP_CASE, &sense)) != 0)
3391 return (error);
3392 VERIFY(nvlist_add_uint64(zplprops,
3393 zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0);
3394
3395 if (is_ci)
3396 *is_ci = (sense == ZFS_CASE_INSENSITIVE);
3397
3398 return (0);
3399 }
3400
3401 static int
zfs_fill_zplprops(const char * dataset,nvlist_t * createprops,nvlist_t * zplprops,boolean_t * is_ci)3402 zfs_fill_zplprops(const char *dataset, nvlist_t *createprops,
3403 nvlist_t *zplprops, boolean_t *is_ci)
3404 {
3405 boolean_t fuids_ok, sa_ok;
3406 uint64_t zplver = ZPL_VERSION;
3407 objset_t *os = NULL;
3408 char parentname[ZFS_MAX_DATASET_NAME_LEN];
3409 spa_t *spa;
3410 uint64_t spa_vers;
3411 int error;
3412
3413 zfs_get_parent(dataset, parentname, sizeof (parentname));
3414
3415 if ((error = spa_open(dataset, &spa, FTAG)) != 0)
3416 return (error);
3417
3418 spa_vers = spa_version(spa);
3419 spa_close(spa, FTAG);
3420
3421 zplver = zfs_zpl_version_map(spa_vers);
3422 fuids_ok = (zplver >= ZPL_VERSION_FUID);
3423 sa_ok = (zplver >= ZPL_VERSION_SA);
3424
3425 /*
3426 * Open parent object set so we can inherit zplprop values.
3427 */
3428 if ((error = dmu_objset_hold(parentname, FTAG, &os)) != 0)
3429 return (error);
3430
3431 error = zfs_fill_zplprops_impl(os, zplver, fuids_ok, sa_ok, createprops,
3432 zplprops, is_ci);
3433 dmu_objset_rele(os, FTAG);
3434 return (error);
3435 }
3436
3437 static int
zfs_fill_zplprops_root(uint64_t spa_vers,nvlist_t * createprops,nvlist_t * zplprops,boolean_t * is_ci)3438 zfs_fill_zplprops_root(uint64_t spa_vers, nvlist_t *createprops,
3439 nvlist_t *zplprops, boolean_t *is_ci)
3440 {
3441 boolean_t fuids_ok;
3442 boolean_t sa_ok;
3443 uint64_t zplver = ZPL_VERSION;
3444 int error;
3445
3446 zplver = zfs_zpl_version_map(spa_vers);
3447 fuids_ok = (zplver >= ZPL_VERSION_FUID);
3448 sa_ok = (zplver >= ZPL_VERSION_SA);
3449
3450 error = zfs_fill_zplprops_impl(NULL, zplver, fuids_ok, sa_ok,
3451 createprops, zplprops, is_ci);
3452 return (error);
3453 }
3454
3455 /*
3456 * innvl: {
3457 * "type" -> dmu_objset_type_t (int32)
3458 * (optional) "props" -> { prop -> value }
3459 * (optional) "hidden_args" -> { "wkeydata" -> value }
3460 * raw uint8_t array of encryption wrapping key data (32 bytes)
3461 * }
3462 *
3463 * outnvl: propname -> error code (int32)
3464 */
3465
3466 static const zfs_ioc_key_t zfs_keys_create[] = {
3467 {"type", DATA_TYPE_INT32, 0},
3468 {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
3469 {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
3470 };
3471
3472 static int
zfs_ioc_create(const char * fsname,nvlist_t * innvl,nvlist_t * outnvl)3473 zfs_ioc_create(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
3474 {
3475 int error = 0;
3476 zfs_creat_t zct = { 0 };
3477 nvlist_t *nvprops = NULL;
3478 nvlist_t *hidden_args = NULL;
3479 void (*cbfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
3480 dmu_objset_type_t type;
3481 boolean_t is_insensitive = B_FALSE;
3482 dsl_crypto_params_t *dcp = NULL;
3483
3484 type = (dmu_objset_type_t)fnvlist_lookup_int32(innvl, "type");
3485 (void) nvlist_lookup_nvlist(innvl, "props", &nvprops);
3486 (void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
3487
3488 switch (type) {
3489 case DMU_OST_ZFS:
3490 cbfunc = zfs_create_cb;
3491 break;
3492
3493 case DMU_OST_ZVOL:
3494 cbfunc = zvol_create_cb;
3495 break;
3496
3497 default:
3498 cbfunc = NULL;
3499 break;
3500 }
3501 if (strchr(fsname, '@') ||
3502 strchr(fsname, '%'))
3503 return (SET_ERROR(EINVAL));
3504
3505 zct.zct_props = nvprops;
3506
3507 if (cbfunc == NULL)
3508 return (SET_ERROR(EINVAL));
3509
3510 if (type == DMU_OST_ZVOL) {
3511 uint64_t volsize, volblocksize;
3512
3513 if (nvprops == NULL)
3514 return (SET_ERROR(EINVAL));
3515 if (nvlist_lookup_uint64(nvprops,
3516 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) != 0)
3517 return (SET_ERROR(EINVAL));
3518
3519 if ((error = nvlist_lookup_uint64(nvprops,
3520 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
3521 &volblocksize)) != 0 && error != ENOENT)
3522 return (SET_ERROR(EINVAL));
3523
3524 if (error != 0)
3525 volblocksize = zfs_prop_default_numeric(
3526 ZFS_PROP_VOLBLOCKSIZE);
3527
3528 if ((error = zvol_check_volblocksize(fsname,
3529 volblocksize)) != 0 ||
3530 (error = zvol_check_volsize(volsize,
3531 volblocksize)) != 0)
3532 return (error);
3533 } else if (type == DMU_OST_ZFS) {
3534 int error;
3535
3536 /*
3537 * We have to have normalization and
3538 * case-folding flags correct when we do the
3539 * file system creation, so go figure them out
3540 * now.
3541 */
3542 VERIFY(nvlist_alloc(&zct.zct_zplprops,
3543 NV_UNIQUE_NAME, KM_SLEEP) == 0);
3544 error = zfs_fill_zplprops(fsname, nvprops,
3545 zct.zct_zplprops, &is_insensitive);
3546 if (error != 0) {
3547 nvlist_free(zct.zct_zplprops);
3548 return (error);
3549 }
3550 }
3551
3552 error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, nvprops,
3553 hidden_args, &dcp);
3554 if (error != 0) {
3555 nvlist_free(zct.zct_zplprops);
3556 return (error);
3557 }
3558
3559 error = dmu_objset_create(fsname, type,
3560 is_insensitive ? DS_FLAG_CI_DATASET : 0, dcp, cbfunc, &zct);
3561
3562 nvlist_free(zct.zct_zplprops);
3563 dsl_crypto_params_free(dcp, !!error);
3564
3565 /*
3566 * It would be nice to do this atomically.
3567 */
3568 if (error == 0) {
3569 error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL,
3570 nvprops, outnvl);
3571 if (error != 0) {
3572 spa_t *spa;
3573 int error2;
3574
3575 /*
3576 * Volumes will return EBUSY and cannot be destroyed
3577 * until all asynchronous minor handling (e.g. from
3578 * setting the volmode property) has completed. Wait for
3579 * the spa_zvol_taskq to drain then retry.
3580 */
3581 error2 = dsl_destroy_head(fsname);
3582 while ((error2 == EBUSY) && (type == DMU_OST_ZVOL)) {
3583 error2 = spa_open(fsname, &spa, FTAG);
3584 if (error2 == 0) {
3585 taskq_wait(spa->spa_zvol_taskq);
3586 spa_close(spa, FTAG);
3587 }
3588 error2 = dsl_destroy_head(fsname);
3589 }
3590 }
3591 }
3592 return (error);
3593 }
3594
3595 /*
3596 * innvl: {
3597 * "origin" -> name of origin snapshot
3598 * (optional) "props" -> { prop -> value }
3599 * (optional) "hidden_args" -> { "wkeydata" -> value }
3600 * raw uint8_t array of encryption wrapping key data (32 bytes)
3601 * }
3602 *
3603 * outputs:
3604 * outnvl: propname -> error code (int32)
3605 */
3606 static const zfs_ioc_key_t zfs_keys_clone[] = {
3607 {"origin", DATA_TYPE_STRING, 0},
3608 {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
3609 {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
3610 };
3611
3612 static int
zfs_ioc_clone(const char * fsname,nvlist_t * innvl,nvlist_t * outnvl)3613 zfs_ioc_clone(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
3614 {
3615 int error = 0;
3616 nvlist_t *nvprops = NULL;
3617 const char *origin_name;
3618
3619 origin_name = fnvlist_lookup_string(innvl, "origin");
3620 (void) nvlist_lookup_nvlist(innvl, "props", &nvprops);
3621
3622 if (strchr(fsname, '@') ||
3623 strchr(fsname, '%'))
3624 return (SET_ERROR(EINVAL));
3625
3626 if (dataset_namecheck(origin_name, NULL, NULL) != 0)
3627 return (SET_ERROR(EINVAL));
3628
3629 error = dmu_objset_clone(fsname, origin_name);
3630
3631 /*
3632 * It would be nice to do this atomically.
3633 */
3634 if (error == 0) {
3635 error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL,
3636 nvprops, outnvl);
3637 if (error != 0)
3638 (void) dsl_destroy_head(fsname);
3639 }
3640 return (error);
3641 }
3642
3643 static const zfs_ioc_key_t zfs_keys_remap[] = {
3644 /* no nvl keys */
3645 };
3646
3647 static int
zfs_ioc_remap(const char * fsname,nvlist_t * innvl,nvlist_t * outnvl)3648 zfs_ioc_remap(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
3649 {
3650 /* This IOCTL is no longer supported. */
3651 (void) fsname, (void) innvl, (void) outnvl;
3652 return (0);
3653 }
3654
3655 /*
3656 * innvl: {
3657 * "snaps" -> { snapshot1, snapshot2 }
3658 * (optional) "props" -> { prop -> value (string) }
3659 * }
3660 *
3661 * outnvl: snapshot -> error code (int32)
3662 */
3663 static const zfs_ioc_key_t zfs_keys_snapshot[] = {
3664 {"snaps", DATA_TYPE_NVLIST, 0},
3665 {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
3666 };
3667
3668 static int
zfs_ioc_snapshot(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)3669 zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
3670 {
3671 nvlist_t *snaps;
3672 nvlist_t *props = NULL;
3673 int error, poollen;
3674 nvpair_t *pair;
3675
3676 (void) nvlist_lookup_nvlist(innvl, "props", &props);
3677 if (!nvlist_empty(props) &&
3678 zfs_earlier_version(poolname, SPA_VERSION_SNAP_PROPS))
3679 return (SET_ERROR(ENOTSUP));
3680 if ((error = zfs_check_userprops(props)) != 0)
3681 return (error);
3682
3683 snaps = fnvlist_lookup_nvlist(innvl, "snaps");
3684 poollen = strlen(poolname);
3685 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
3686 pair = nvlist_next_nvpair(snaps, pair)) {
3687 const char *name = nvpair_name(pair);
3688 char *cp = strchr(name, '@');
3689
3690 /*
3691 * The snap name must contain an @, and the part after it must
3692 * contain only valid characters.
3693 */
3694 if (cp == NULL ||
3695 zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
3696 return (SET_ERROR(EINVAL));
3697
3698 /*
3699 * The snap must be in the specified pool.
3700 */
3701 if (strncmp(name, poolname, poollen) != 0 ||
3702 (name[poollen] != '/' && name[poollen] != '@'))
3703 return (SET_ERROR(EXDEV));
3704
3705 /*
3706 * Check for permission to set the properties on the fs.
3707 */
3708 if (!nvlist_empty(props)) {
3709 *cp = '\0';
3710 error = zfs_secpolicy_write_perms(name,
3711 ZFS_DELEG_PERM_USERPROP, CRED());
3712 *cp = '@';
3713 if (error != 0)
3714 return (error);
3715 }
3716
3717 /* This must be the only snap of this fs. */
3718 for (nvpair_t *pair2 = nvlist_next_nvpair(snaps, pair);
3719 pair2 != NULL; pair2 = nvlist_next_nvpair(snaps, pair2)) {
3720 if (strncmp(name, nvpair_name(pair2), cp - name + 1)
3721 == 0) {
3722 return (SET_ERROR(EXDEV));
3723 }
3724 }
3725 }
3726
3727 error = dsl_dataset_snapshot(snaps, props, outnvl);
3728
3729 return (error);
3730 }
3731
3732 /*
3733 * innvl: "message" -> string
3734 */
3735 static const zfs_ioc_key_t zfs_keys_log_history[] = {
3736 {"message", DATA_TYPE_STRING, 0},
3737 };
3738
3739 static int
zfs_ioc_log_history(const char * unused,nvlist_t * innvl,nvlist_t * outnvl)3740 zfs_ioc_log_history(const char *unused, nvlist_t *innvl, nvlist_t *outnvl)
3741 {
3742 (void) unused, (void) outnvl;
3743 const char *message;
3744 char *poolname;
3745 spa_t *spa;
3746 int error;
3747
3748 /*
3749 * The poolname in the ioctl is not set, we get it from the TSD,
3750 * which was set at the end of the last successful ioctl that allows
3751 * logging. The secpolicy func already checked that it is set.
3752 * Only one log ioctl is allowed after each successful ioctl, so
3753 * we clear the TSD here.
3754 */
3755 poolname = tsd_get(zfs_allow_log_key);
3756 if (poolname == NULL)
3757 return (SET_ERROR(EINVAL));
3758 (void) tsd_set(zfs_allow_log_key, NULL);
3759 error = spa_open(poolname, &spa, FTAG);
3760 kmem_strfree(poolname);
3761 if (error != 0)
3762 return (error);
3763
3764 message = fnvlist_lookup_string(innvl, "message");
3765
3766 if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) {
3767 spa_close(spa, FTAG);
3768 return (SET_ERROR(ENOTSUP));
3769 }
3770
3771 error = spa_history_log(spa, message);
3772 spa_close(spa, FTAG);
3773 return (error);
3774 }
3775
3776 /*
3777 * This ioctl is used to set the bootenv configuration on the current
3778 * pool. This configuration is stored in the second padding area of the label,
3779 * and it is used by the bootloader(s) to store the bootloader and/or system
3780 * specific data.
3781 * The data is stored as nvlist data stream, and is protected by
3782 * an embedded checksum.
3783 * The version can have two possible values:
3784 * VB_RAW: nvlist should have key GRUB_ENVMAP, value DATA_TYPE_STRING.
3785 * VB_NVLIST: nvlist with arbitrary <key, value> pairs.
3786 */
3787 static const zfs_ioc_key_t zfs_keys_set_bootenv[] = {
3788 {"version", DATA_TYPE_UINT64, 0},
3789 {"<keys>", DATA_TYPE_ANY, ZK_OPTIONAL | ZK_WILDCARDLIST},
3790 };
3791
3792 static int
zfs_ioc_set_bootenv(const char * name,nvlist_t * innvl,nvlist_t * outnvl)3793 zfs_ioc_set_bootenv(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
3794 {
3795 int error;
3796 spa_t *spa;
3797
3798 if ((error = spa_open(name, &spa, FTAG)) != 0)
3799 return (error);
3800 spa_vdev_state_enter(spa, SCL_ALL);
3801 error = vdev_label_write_bootenv(spa->spa_root_vdev, innvl);
3802 (void) spa_vdev_state_exit(spa, NULL, 0);
3803 spa_close(spa, FTAG);
3804 return (error);
3805 }
3806
3807 static const zfs_ioc_key_t zfs_keys_get_bootenv[] = {
3808 /* no nvl keys */
3809 };
3810
3811 static int
zfs_ioc_get_bootenv(const char * name,nvlist_t * innvl,nvlist_t * outnvl)3812 zfs_ioc_get_bootenv(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
3813 {
3814 spa_t *spa;
3815 int error;
3816
3817 if ((error = spa_open(name, &spa, FTAG)) != 0)
3818 return (error);
3819 spa_vdev_state_enter(spa, SCL_ALL);
3820 error = vdev_label_read_bootenv(spa->spa_root_vdev, outnvl);
3821 (void) spa_vdev_state_exit(spa, NULL, 0);
3822 spa_close(spa, FTAG);
3823 return (error);
3824 }
3825
3826 /*
3827 * The dp_config_rwlock must not be held when calling this, because the
3828 * unmount may need to write out data.
3829 *
3830 * This function is best-effort. Callers must deal gracefully if it
3831 * remains mounted (or is remounted after this call).
3832 *
3833 * Returns 0 if the argument is not a snapshot, or it is not currently a
3834 * filesystem, or we were able to unmount it. Returns error code otherwise.
3835 */
3836 void
zfs_unmount_snap(const char * snapname)3837 zfs_unmount_snap(const char *snapname)
3838 {
3839 if (strchr(snapname, '@') == NULL)
3840 return;
3841
3842 (void) zfsctl_snapshot_unmount(snapname, MNT_FORCE);
3843 }
3844
3845 static int
zfs_unmount_snap_cb(const char * snapname,void * arg)3846 zfs_unmount_snap_cb(const char *snapname, void *arg)
3847 {
3848 (void) arg;
3849 zfs_unmount_snap(snapname);
3850 return (0);
3851 }
3852
3853 /*
3854 * When a clone is destroyed, its origin may also need to be destroyed,
3855 * in which case it must be unmounted. This routine will do that unmount
3856 * if necessary.
3857 */
3858 void
zfs_destroy_unmount_origin(const char * fsname)3859 zfs_destroy_unmount_origin(const char *fsname)
3860 {
3861 int error;
3862 objset_t *os;
3863 dsl_dataset_t *ds;
3864
3865 error = dmu_objset_hold(fsname, FTAG, &os);
3866 if (error != 0)
3867 return;
3868 ds = dmu_objset_ds(os);
3869 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev)) {
3870 char originname[ZFS_MAX_DATASET_NAME_LEN];
3871 dsl_dataset_name(ds->ds_prev, originname);
3872 dmu_objset_rele(os, FTAG);
3873 zfs_unmount_snap(originname);
3874 } else {
3875 dmu_objset_rele(os, FTAG);
3876 }
3877 }
3878
3879 /*
3880 * innvl: {
3881 * "snaps" -> { snapshot1, snapshot2 }
3882 * (optional boolean) "defer"
3883 * }
3884 *
3885 * outnvl: snapshot -> error code (int32)
3886 */
3887 static const zfs_ioc_key_t zfs_keys_destroy_snaps[] = {
3888 {"snaps", DATA_TYPE_NVLIST, 0},
3889 {"defer", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
3890 };
3891
3892 static int
zfs_ioc_destroy_snaps(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)3893 zfs_ioc_destroy_snaps(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
3894 {
3895 int poollen;
3896 nvlist_t *snaps;
3897 nvpair_t *pair;
3898 boolean_t defer;
3899 spa_t *spa;
3900
3901 snaps = fnvlist_lookup_nvlist(innvl, "snaps");
3902 defer = nvlist_exists(innvl, "defer");
3903
3904 poollen = strlen(poolname);
3905 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
3906 pair = nvlist_next_nvpair(snaps, pair)) {
3907 const char *name = nvpair_name(pair);
3908
3909 /*
3910 * The snap must be in the specified pool to prevent the
3911 * invalid removal of zvol minors below.
3912 */
3913 if (strncmp(name, poolname, poollen) != 0 ||
3914 (name[poollen] != '/' && name[poollen] != '@'))
3915 return (SET_ERROR(EXDEV));
3916
3917 zfs_unmount_snap(nvpair_name(pair));
3918 if (spa_open(name, &spa, FTAG) == 0) {
3919 zvol_remove_minors(spa, name, B_TRUE);
3920 spa_close(spa, FTAG);
3921 }
3922 }
3923
3924 return (dsl_destroy_snapshots_nvl(snaps, defer, outnvl));
3925 }
3926
3927 /*
3928 * Create bookmarks. The bookmark names are of the form <fs>#<bmark>.
3929 * All bookmarks and snapshots must be in the same pool.
3930 * dsl_bookmark_create_nvl_validate describes the nvlist schema in more detail.
3931 *
3932 * innvl: {
3933 * new_bookmark1 -> existing_snapshot,
3934 * new_bookmark2 -> existing_bookmark,
3935 * }
3936 *
3937 * outnvl: bookmark -> error code (int32)
3938 *
3939 */
3940 static const zfs_ioc_key_t zfs_keys_bookmark[] = {
3941 {"<bookmark>...", DATA_TYPE_STRING, ZK_WILDCARDLIST},
3942 };
3943
3944 static int
zfs_ioc_bookmark(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)3945 zfs_ioc_bookmark(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
3946 {
3947 (void) poolname;
3948 return (dsl_bookmark_create(innvl, outnvl));
3949 }
3950
3951 /*
3952 * innvl: {
3953 * property 1, property 2, ...
3954 * }
3955 *
3956 * outnvl: {
3957 * bookmark name 1 -> { property 1, property 2, ... },
3958 * bookmark name 2 -> { property 1, property 2, ... }
3959 * }
3960 *
3961 */
3962 static const zfs_ioc_key_t zfs_keys_get_bookmarks[] = {
3963 {"<property>...", DATA_TYPE_BOOLEAN, ZK_WILDCARDLIST | ZK_OPTIONAL},
3964 };
3965
3966 static int
zfs_ioc_get_bookmarks(const char * fsname,nvlist_t * innvl,nvlist_t * outnvl)3967 zfs_ioc_get_bookmarks(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
3968 {
3969 return (dsl_get_bookmarks(fsname, innvl, outnvl));
3970 }
3971
3972 /*
3973 * innvl is not used.
3974 *
3975 * outnvl: {
3976 * property 1, property 2, ...
3977 * }
3978 *
3979 */
3980 static const zfs_ioc_key_t zfs_keys_get_bookmark_props[] = {
3981 /* no nvl keys */
3982 };
3983
3984 static int
zfs_ioc_get_bookmark_props(const char * bookmark,nvlist_t * innvl,nvlist_t * outnvl)3985 zfs_ioc_get_bookmark_props(const char *bookmark, nvlist_t *innvl,
3986 nvlist_t *outnvl)
3987 {
3988 (void) innvl;
3989 char fsname[ZFS_MAX_DATASET_NAME_LEN];
3990 char *bmname;
3991
3992 bmname = strchr(bookmark, '#');
3993 if (bmname == NULL)
3994 return (SET_ERROR(EINVAL));
3995 bmname++;
3996
3997 (void) strlcpy(fsname, bookmark, sizeof (fsname));
3998 *(strchr(fsname, '#')) = '\0';
3999
4000 return (dsl_get_bookmark_props(fsname, bmname, outnvl));
4001 }
4002
4003 /*
4004 * innvl: {
4005 * bookmark name 1, bookmark name 2
4006 * }
4007 *
4008 * outnvl: bookmark -> error code (int32)
4009 *
4010 */
4011 static const zfs_ioc_key_t zfs_keys_destroy_bookmarks[] = {
4012 {"<bookmark>...", DATA_TYPE_BOOLEAN, ZK_WILDCARDLIST},
4013 };
4014
4015 static int
zfs_ioc_destroy_bookmarks(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4016 zfs_ioc_destroy_bookmarks(const char *poolname, nvlist_t *innvl,
4017 nvlist_t *outnvl)
4018 {
4019 int error, poollen;
4020
4021 poollen = strlen(poolname);
4022 for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
4023 pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
4024 const char *name = nvpair_name(pair);
4025 const char *cp = strchr(name, '#');
4026
4027 /*
4028 * The bookmark name must contain an #, and the part after it
4029 * must contain only valid characters.
4030 */
4031 if (cp == NULL ||
4032 zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
4033 return (SET_ERROR(EINVAL));
4034
4035 /*
4036 * The bookmark must be in the specified pool.
4037 */
4038 if (strncmp(name, poolname, poollen) != 0 ||
4039 (name[poollen] != '/' && name[poollen] != '#'))
4040 return (SET_ERROR(EXDEV));
4041 }
4042
4043 error = dsl_bookmark_destroy(innvl, outnvl);
4044 return (error);
4045 }
4046
4047 static const zfs_ioc_key_t zfs_keys_channel_program[] = {
4048 {"program", DATA_TYPE_STRING, 0},
4049 {"arg", DATA_TYPE_ANY, 0},
4050 {"sync", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
4051 {"instrlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
4052 {"memlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
4053 };
4054
4055 static int
zfs_ioc_channel_program(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4056 zfs_ioc_channel_program(const char *poolname, nvlist_t *innvl,
4057 nvlist_t *outnvl)
4058 {
4059 const char *program;
4060 uint64_t instrlimit, memlimit;
4061 boolean_t sync_flag;
4062 nvpair_t *nvarg = NULL;
4063
4064 program = fnvlist_lookup_string(innvl, ZCP_ARG_PROGRAM);
4065 if (0 != nvlist_lookup_boolean_value(innvl, ZCP_ARG_SYNC, &sync_flag)) {
4066 sync_flag = B_TRUE;
4067 }
4068 if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_INSTRLIMIT, &instrlimit)) {
4069 instrlimit = ZCP_DEFAULT_INSTRLIMIT;
4070 }
4071 if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_MEMLIMIT, &memlimit)) {
4072 memlimit = ZCP_DEFAULT_MEMLIMIT;
4073 }
4074 nvarg = fnvlist_lookup_nvpair(innvl, ZCP_ARG_ARGLIST);
4075
4076 if (instrlimit == 0 || instrlimit > zfs_lua_max_instrlimit)
4077 return (SET_ERROR(EINVAL));
4078 if (memlimit == 0 || memlimit > zfs_lua_max_memlimit)
4079 return (SET_ERROR(EINVAL));
4080
4081 return (zcp_eval(poolname, program, sync_flag, instrlimit, memlimit,
4082 nvarg, outnvl));
4083 }
4084
4085 /*
4086 * innvl: unused
4087 * outnvl: empty
4088 */
4089 static const zfs_ioc_key_t zfs_keys_pool_checkpoint[] = {
4090 /* no nvl keys */
4091 };
4092
4093 static int
zfs_ioc_pool_checkpoint(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4094 zfs_ioc_pool_checkpoint(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
4095 {
4096 (void) innvl, (void) outnvl;
4097 return (spa_checkpoint(poolname));
4098 }
4099
4100 /*
4101 * innvl: unused
4102 * outnvl: empty
4103 */
4104 static const zfs_ioc_key_t zfs_keys_pool_discard_checkpoint[] = {
4105 /* no nvl keys */
4106 };
4107
4108 static int
zfs_ioc_pool_discard_checkpoint(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4109 zfs_ioc_pool_discard_checkpoint(const char *poolname, nvlist_t *innvl,
4110 nvlist_t *outnvl)
4111 {
4112 (void) innvl, (void) outnvl;
4113 return (spa_checkpoint_discard(poolname));
4114 }
4115
4116 /*
4117 * Loads specific types of data for the given pool
4118 *
4119 * innvl: {
4120 * "prefetch_type" -> int32_t
4121 * }
4122 *
4123 * outnvl: empty
4124 */
4125 static const zfs_ioc_key_t zfs_keys_pool_prefetch[] = {
4126 {ZPOOL_PREFETCH_TYPE, DATA_TYPE_INT32, 0},
4127 };
4128
4129 static int
zfs_ioc_pool_prefetch(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4130 zfs_ioc_pool_prefetch(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
4131 {
4132 (void) outnvl;
4133
4134 int error;
4135 spa_t *spa;
4136 int32_t type;
4137
4138 /*
4139 * Currently, only ZPOOL_PREFETCH_DDT is supported
4140 */
4141 if (nvlist_lookup_int32(innvl, ZPOOL_PREFETCH_TYPE, &type) != 0 ||
4142 type != ZPOOL_PREFETCH_DDT) {
4143 return (EINVAL);
4144 }
4145
4146 error = spa_open(poolname, &spa, FTAG);
4147 if (error != 0)
4148 return (error);
4149
4150 hrtime_t start_time = gethrtime();
4151
4152 ddt_prefetch_all(spa);
4153
4154 zfs_dbgmsg("pool '%s': loaded ddt into ARC in %llu ms", spa->spa_name,
4155 (u_longlong_t)NSEC2MSEC(gethrtime() - start_time));
4156
4157 spa_close(spa, FTAG);
4158
4159 return (error);
4160 }
4161
4162 /*
4163 * inputs:
4164 * zc_name name of dataset to destroy
4165 * zc_defer_destroy mark for deferred destroy
4166 *
4167 * outputs: none
4168 */
4169 static int
zfs_ioc_destroy(zfs_cmd_t * zc)4170 zfs_ioc_destroy(zfs_cmd_t *zc)
4171 {
4172 objset_t *os;
4173 dmu_objset_type_t ost;
4174 int err;
4175
4176 err = dmu_objset_hold(zc->zc_name, FTAG, &os);
4177 if (err != 0)
4178 return (err);
4179 ost = dmu_objset_type(os);
4180 dmu_objset_rele(os, FTAG);
4181
4182 if (ost == DMU_OST_ZFS)
4183 zfs_unmount_snap(zc->zc_name);
4184
4185 if (strchr(zc->zc_name, '@')) {
4186 err = dsl_destroy_snapshot(zc->zc_name, zc->zc_defer_destroy);
4187 } else {
4188 err = dsl_destroy_head(zc->zc_name);
4189 if (err == EEXIST) {
4190 /*
4191 * It is possible that the given DS may have
4192 * hidden child (%recv) datasets - "leftovers"
4193 * resulting from the previously interrupted
4194 * 'zfs receive'.
4195 *
4196 * 6 extra bytes for /%recv
4197 */
4198 char namebuf[ZFS_MAX_DATASET_NAME_LEN + 6];
4199
4200 if (snprintf(namebuf, sizeof (namebuf), "%s/%s",
4201 zc->zc_name, recv_clone_name) >=
4202 sizeof (namebuf))
4203 return (SET_ERROR(EINVAL));
4204
4205 /*
4206 * Try to remove the hidden child (%recv) and after
4207 * that try to remove the target dataset.
4208 * If the hidden child (%recv) does not exist
4209 * the original error (EEXIST) will be returned
4210 */
4211 err = dsl_destroy_head(namebuf);
4212 if (err == 0)
4213 err = dsl_destroy_head(zc->zc_name);
4214 else if (err == ENOENT)
4215 err = SET_ERROR(EEXIST);
4216 }
4217 }
4218
4219 return (err);
4220 }
4221
4222 /*
4223 * innvl: {
4224 * "initialize_command" -> POOL_INITIALIZE_{CANCEL|START|SUSPEND} (uint64)
4225 * "initialize_vdevs": { -> guids to initialize (nvlist)
4226 * "vdev_path_1": vdev_guid_1, (uint64),
4227 * "vdev_path_2": vdev_guid_2, (uint64),
4228 * ...
4229 * },
4230 * }
4231 *
4232 * outnvl: {
4233 * "initialize_vdevs": { -> initialization errors (nvlist)
4234 * "vdev_path_1": errno, see function body for possible errnos (uint64)
4235 * "vdev_path_2": errno, ... (uint64)
4236 * ...
4237 * }
4238 * }
4239 *
4240 * EINVAL is returned for an unknown commands or if any of the provided vdev
4241 * guids have be specified with a type other than uint64.
4242 */
4243 static const zfs_ioc_key_t zfs_keys_pool_initialize[] = {
4244 {ZPOOL_INITIALIZE_COMMAND, DATA_TYPE_UINT64, 0},
4245 {ZPOOL_INITIALIZE_VDEVS, DATA_TYPE_NVLIST, 0}
4246 };
4247
4248 static int
zfs_ioc_pool_initialize(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4249 zfs_ioc_pool_initialize(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
4250 {
4251 uint64_t cmd_type;
4252 if (nvlist_lookup_uint64(innvl, ZPOOL_INITIALIZE_COMMAND,
4253 &cmd_type) != 0) {
4254 return (SET_ERROR(EINVAL));
4255 }
4256
4257 if (!(cmd_type == POOL_INITIALIZE_CANCEL ||
4258 cmd_type == POOL_INITIALIZE_START ||
4259 cmd_type == POOL_INITIALIZE_SUSPEND ||
4260 cmd_type == POOL_INITIALIZE_UNINIT)) {
4261 return (SET_ERROR(EINVAL));
4262 }
4263
4264 nvlist_t *vdev_guids;
4265 if (nvlist_lookup_nvlist(innvl, ZPOOL_INITIALIZE_VDEVS,
4266 &vdev_guids) != 0) {
4267 return (SET_ERROR(EINVAL));
4268 }
4269
4270 for (nvpair_t *pair = nvlist_next_nvpair(vdev_guids, NULL);
4271 pair != NULL; pair = nvlist_next_nvpair(vdev_guids, pair)) {
4272 uint64_t vdev_guid;
4273 if (nvpair_value_uint64(pair, &vdev_guid) != 0) {
4274 return (SET_ERROR(EINVAL));
4275 }
4276 }
4277
4278 spa_t *spa;
4279 int error = spa_open(poolname, &spa, FTAG);
4280 if (error != 0)
4281 return (error);
4282
4283 nvlist_t *vdev_errlist = fnvlist_alloc();
4284 int total_errors = spa_vdev_initialize(spa, vdev_guids, cmd_type,
4285 vdev_errlist);
4286
4287 if (fnvlist_size(vdev_errlist) > 0) {
4288 fnvlist_add_nvlist(outnvl, ZPOOL_INITIALIZE_VDEVS,
4289 vdev_errlist);
4290 }
4291 fnvlist_free(vdev_errlist);
4292
4293 spa_close(spa, FTAG);
4294 return (total_errors > 0 ? SET_ERROR(EINVAL) : 0);
4295 }
4296
4297 /*
4298 * innvl: {
4299 * "trim_command" -> POOL_TRIM_{CANCEL|START|SUSPEND} (uint64)
4300 * "trim_vdevs": { -> guids to TRIM (nvlist)
4301 * "vdev_path_1": vdev_guid_1, (uint64),
4302 * "vdev_path_2": vdev_guid_2, (uint64),
4303 * ...
4304 * },
4305 * "trim_rate" -> Target TRIM rate in bytes/sec.
4306 * "trim_secure" -> Set to request a secure TRIM.
4307 * }
4308 *
4309 * outnvl: {
4310 * "trim_vdevs": { -> TRIM errors (nvlist)
4311 * "vdev_path_1": errno, see function body for possible errnos (uint64)
4312 * "vdev_path_2": errno, ... (uint64)
4313 * ...
4314 * }
4315 * }
4316 *
4317 * EINVAL is returned for an unknown commands or if any of the provided vdev
4318 * guids have be specified with a type other than uint64.
4319 */
4320 static const zfs_ioc_key_t zfs_keys_pool_trim[] = {
4321 {ZPOOL_TRIM_COMMAND, DATA_TYPE_UINT64, 0},
4322 {ZPOOL_TRIM_VDEVS, DATA_TYPE_NVLIST, 0},
4323 {ZPOOL_TRIM_RATE, DATA_TYPE_UINT64, ZK_OPTIONAL},
4324 {ZPOOL_TRIM_SECURE, DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
4325 };
4326
4327 static int
zfs_ioc_pool_trim(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4328 zfs_ioc_pool_trim(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
4329 {
4330 uint64_t cmd_type;
4331 if (nvlist_lookup_uint64(innvl, ZPOOL_TRIM_COMMAND, &cmd_type) != 0)
4332 return (SET_ERROR(EINVAL));
4333
4334 if (!(cmd_type == POOL_TRIM_CANCEL ||
4335 cmd_type == POOL_TRIM_START ||
4336 cmd_type == POOL_TRIM_SUSPEND)) {
4337 return (SET_ERROR(EINVAL));
4338 }
4339
4340 nvlist_t *vdev_guids;
4341 if (nvlist_lookup_nvlist(innvl, ZPOOL_TRIM_VDEVS, &vdev_guids) != 0)
4342 return (SET_ERROR(EINVAL));
4343
4344 for (nvpair_t *pair = nvlist_next_nvpair(vdev_guids, NULL);
4345 pair != NULL; pair = nvlist_next_nvpair(vdev_guids, pair)) {
4346 uint64_t vdev_guid;
4347 if (nvpair_value_uint64(pair, &vdev_guid) != 0) {
4348 return (SET_ERROR(EINVAL));
4349 }
4350 }
4351
4352 /* Optional, defaults to maximum rate when not provided */
4353 uint64_t rate;
4354 if (nvlist_lookup_uint64(innvl, ZPOOL_TRIM_RATE, &rate) != 0)
4355 rate = 0;
4356
4357 /* Optional, defaults to standard TRIM when not provided */
4358 boolean_t secure;
4359 if (nvlist_lookup_boolean_value(innvl, ZPOOL_TRIM_SECURE,
4360 &secure) != 0) {
4361 secure = B_FALSE;
4362 }
4363
4364 spa_t *spa;
4365 int error = spa_open(poolname, &spa, FTAG);
4366 if (error != 0)
4367 return (error);
4368
4369 nvlist_t *vdev_errlist = fnvlist_alloc();
4370 int total_errors = spa_vdev_trim(spa, vdev_guids, cmd_type,
4371 rate, !!zfs_trim_metaslab_skip, secure, vdev_errlist);
4372
4373 if (fnvlist_size(vdev_errlist) > 0)
4374 fnvlist_add_nvlist(outnvl, ZPOOL_TRIM_VDEVS, vdev_errlist);
4375
4376 fnvlist_free(vdev_errlist);
4377
4378 spa_close(spa, FTAG);
4379 return (total_errors > 0 ? SET_ERROR(EINVAL) : 0);
4380 }
4381
4382 #define DDT_PRUNE_UNIT "ddt_prune_unit"
4383 #define DDT_PRUNE_AMOUNT "ddt_prune_amount"
4384
4385 /*
4386 * innvl: {
4387 * "ddt_prune_unit" -> uint32_t
4388 * "ddt_prune_amount" -> uint64_t
4389 * }
4390 *
4391 * outnvl: "waited" -> boolean_t
4392 */
4393 static const zfs_ioc_key_t zfs_keys_ddt_prune[] = {
4394 {DDT_PRUNE_UNIT, DATA_TYPE_INT32, 0},
4395 {DDT_PRUNE_AMOUNT, DATA_TYPE_UINT64, 0},
4396 };
4397
4398 static int
zfs_ioc_ddt_prune(const char * poolname,nvlist_t * innvl,nvlist_t * outnvl)4399 zfs_ioc_ddt_prune(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
4400 {
4401 int32_t unit;
4402 uint64_t amount;
4403
4404 if (nvlist_lookup_int32(innvl, DDT_PRUNE_UNIT, &unit) != 0 ||
4405 nvlist_lookup_uint64(innvl, DDT_PRUNE_AMOUNT, &amount) != 0) {
4406 return (EINVAL);
4407 }
4408
4409 spa_t *spa;
4410 int error = spa_open(poolname, &spa, FTAG);
4411 if (error != 0)
4412 return (error);
4413
4414 if (!spa_feature_is_enabled(spa, SPA_FEATURE_FAST_DEDUP)) {
4415 spa_close(spa, FTAG);
4416 return (SET_ERROR(ENOTSUP));
4417 }
4418
4419 error = ddt_prune_unique_entries(spa, (zpool_ddt_prune_unit_t)unit,
4420 amount);
4421
4422 spa_close(spa, FTAG);
4423
4424 return (error);
4425 }
4426
4427 /*
4428 * This ioctl waits for activity of a particular type to complete. If there is
4429 * no activity of that type in progress, it returns immediately, and the
4430 * returned value "waited" is false. If there is activity in progress, and no
4431 * tag is passed in, the ioctl blocks until all activity of that type is
4432 * complete, and then returns with "waited" set to true.
4433 *
4434 * If a tag is provided, it identifies a particular instance of an activity to
4435 * wait for. Currently, this is only valid for use with 'initialize', because
4436 * that is the only activity for which there can be multiple instances running
4437 * concurrently. In the case of 'initialize', the tag corresponds to the guid of
4438 * the vdev on which to wait.
4439 *
4440 * If a thread waiting in the ioctl receives a signal, the call will return
4441 * immediately, and the return value will be EINTR.
4442 *
4443 * innvl: {
4444 * "wait_activity" -> int32_t
4445 * (optional) "wait_tag" -> uint64_t
4446 * }
4447 *
4448 * outnvl: "waited" -> boolean_t
4449 */
4450 static const zfs_ioc_key_t zfs_keys_pool_wait[] = {
4451 {ZPOOL_WAIT_ACTIVITY, DATA_TYPE_INT32, 0},
4452 {ZPOOL_WAIT_TAG, DATA_TYPE_UINT64, ZK_OPTIONAL},
4453 };
4454
4455 static int
zfs_ioc_wait(const char * name,nvlist_t * innvl,nvlist_t * outnvl)4456 zfs_ioc_wait(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
4457 {
4458 int32_t activity;
4459 uint64_t tag;
4460 boolean_t waited;
4461 int error;
4462
4463 if (nvlist_lookup_int32(innvl, ZPOOL_WAIT_ACTIVITY, &activity) != 0)
4464 return (EINVAL);
4465
4466 if (nvlist_lookup_uint64(innvl, ZPOOL_WAIT_TAG, &tag) == 0)
4467 error = spa_wait_tag(name, activity, tag, &waited);
4468 else
4469 error = spa_wait(name, activity, &waited);
4470
4471 if (error == 0)
4472 fnvlist_add_boolean_value(outnvl, ZPOOL_WAIT_WAITED, waited);
4473
4474 return (error);
4475 }
4476
4477 /*
4478 * This ioctl waits for activity of a particular type to complete. If there is
4479 * no activity of that type in progress, it returns immediately, and the
4480 * returned value "waited" is false. If there is activity in progress, and no
4481 * tag is passed in, the ioctl blocks until all activity of that type is
4482 * complete, and then returns with "waited" set to true.
4483 *
4484 * If a thread waiting in the ioctl receives a signal, the call will return
4485 * immediately, and the return value will be EINTR.
4486 *
4487 * innvl: {
4488 * "wait_activity" -> int32_t
4489 * }
4490 *
4491 * outnvl: "waited" -> boolean_t
4492 */
4493 static const zfs_ioc_key_t zfs_keys_fs_wait[] = {
4494 {ZFS_WAIT_ACTIVITY, DATA_TYPE_INT32, 0},
4495 };
4496
4497 static int
zfs_ioc_wait_fs(const char * name,nvlist_t * innvl,nvlist_t * outnvl)4498 zfs_ioc_wait_fs(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
4499 {
4500 int32_t activity;
4501 boolean_t waited = B_FALSE;
4502 int error;
4503 dsl_pool_t *dp;
4504 dsl_dir_t *dd;
4505 dsl_dataset_t *ds;
4506
4507 if (nvlist_lookup_int32(innvl, ZFS_WAIT_ACTIVITY, &activity) != 0)
4508 return (SET_ERROR(EINVAL));
4509
4510 if (activity >= ZFS_WAIT_NUM_ACTIVITIES || activity < 0)
4511 return (SET_ERROR(EINVAL));
4512
4513 if ((error = dsl_pool_hold(name, FTAG, &dp)) != 0)
4514 return (error);
4515
4516 if ((error = dsl_dataset_hold(dp, name, FTAG, &ds)) != 0) {
4517 dsl_pool_rele(dp, FTAG);
4518 return (error);
4519 }
4520
4521 dd = ds->ds_dir;
4522 mutex_enter(&dd->dd_activity_lock);
4523 dd->dd_activity_waiters++;
4524
4525 /*
4526 * We get a long-hold here so that the dsl_dataset_t and dsl_dir_t
4527 * aren't evicted while we're waiting. Normally this is prevented by
4528 * holding the pool, but we can't do that while we're waiting since
4529 * that would prevent TXGs from syncing out. Some of the functionality
4530 * of long-holds (e.g. preventing deletion) is unnecessary for this
4531 * case, since we would cancel the waiters before proceeding with a
4532 * deletion. An alternative mechanism for keeping the dataset around
4533 * could be developed but this is simpler.
4534 */
4535 dsl_dataset_long_hold(ds, FTAG);
4536 dsl_pool_rele(dp, FTAG);
4537
4538 error = dsl_dir_wait(dd, ds, activity, &waited);
4539
4540 dsl_dataset_long_rele(ds, FTAG);
4541 dd->dd_activity_waiters--;
4542 if (dd->dd_activity_waiters == 0)
4543 cv_signal(&dd->dd_activity_cv);
4544 mutex_exit(&dd->dd_activity_lock);
4545
4546 dsl_dataset_rele(ds, FTAG);
4547
4548 if (error == 0)
4549 fnvlist_add_boolean_value(outnvl, ZFS_WAIT_WAITED, waited);
4550
4551 return (error);
4552 }
4553
4554 /*
4555 * fsname is name of dataset to rollback (to most recent snapshot)
4556 *
4557 * innvl may contain name of expected target snapshot
4558 *
4559 * outnvl: "target" -> name of most recent snapshot
4560 * }
4561 */
4562 static const zfs_ioc_key_t zfs_keys_rollback[] = {
4563 {"target", DATA_TYPE_STRING, ZK_OPTIONAL},
4564 };
4565
4566 static int
zfs_ioc_rollback(const char * fsname,nvlist_t * innvl,nvlist_t * outnvl)4567 zfs_ioc_rollback(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
4568 {
4569 zfsvfs_t *zfsvfs;
4570 zvol_state_handle_t *zv;
4571 const char *target = NULL;
4572 int error;
4573
4574 (void) nvlist_lookup_string(innvl, "target", &target);
4575 if (target != NULL) {
4576 const char *cp = strchr(target, '@');
4577
4578 /*
4579 * The snap name must contain an @, and the part after it must
4580 * contain only valid characters.
4581 */
4582 if (cp == NULL ||
4583 zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
4584 return (SET_ERROR(EINVAL));
4585 }
4586
4587 if (getzfsvfs(fsname, &zfsvfs) == 0) {
4588 dsl_dataset_t *ds;
4589
4590 ds = dmu_objset_ds(zfsvfs->z_os);
4591 error = zfs_suspend_fs(zfsvfs);
4592 if (error == 0) {
4593 int resume_err;
4594
4595 error = dsl_dataset_rollback(fsname, target, zfsvfs,
4596 outnvl);
4597 resume_err = zfs_resume_fs(zfsvfs, ds);
4598 error = error ? error : resume_err;
4599 }
4600 zfs_vfs_rele(zfsvfs);
4601 } else if ((zv = zvol_suspend(fsname)) != NULL) {
4602 error = dsl_dataset_rollback(fsname, target, zvol_tag(zv),
4603 outnvl);
4604 zvol_resume(zv);
4605 } else {
4606 error = dsl_dataset_rollback(fsname, target, NULL, outnvl);
4607 }
4608 return (error);
4609 }
4610
4611 static int
recursive_unmount(const char * fsname,void * arg)4612 recursive_unmount(const char *fsname, void *arg)
4613 {
4614 const char *snapname = arg;
4615 char *fullname;
4616
4617 fullname = kmem_asprintf("%s@%s", fsname, snapname);
4618 zfs_unmount_snap(fullname);
4619 kmem_strfree(fullname);
4620
4621 return (0);
4622 }
4623
4624 /*
4625 *
4626 * snapname is the snapshot to redact.
4627 * innvl: {
4628 * "bookname" -> (string)
4629 * shortname of the redaction bookmark to generate
4630 * "snapnv" -> (nvlist, values ignored)
4631 * snapshots to redact snapname with respect to
4632 * }
4633 *
4634 * outnvl is unused
4635 */
4636
4637 static const zfs_ioc_key_t zfs_keys_redact[] = {
4638 {"bookname", DATA_TYPE_STRING, 0},
4639 {"snapnv", DATA_TYPE_NVLIST, 0},
4640 };
4641
4642 static int
zfs_ioc_redact(const char * snapname,nvlist_t * innvl,nvlist_t * outnvl)4643 zfs_ioc_redact(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
4644 {
4645 (void) outnvl;
4646 nvlist_t *redactnvl = NULL;
4647 const char *redactbook = NULL;
4648
4649 if (nvlist_lookup_nvlist(innvl, "snapnv", &redactnvl) != 0)
4650 return (SET_ERROR(EINVAL));
4651 if (fnvlist_num_pairs(redactnvl) == 0)
4652 return (SET_ERROR(ENXIO));
4653 if (nvlist_lookup_string(innvl, "bookname", &redactbook) != 0)
4654 return (SET_ERROR(EINVAL));
4655
4656 return (dmu_redact_snap(snapname, redactnvl, redactbook));
4657 }
4658
4659 /*
4660 * inputs:
4661 * zc_name old name of dataset
4662 * zc_value new name of dataset
4663 * zc_cookie recursive flag (only valid for snapshots)
4664 *
4665 * outputs: none
4666 */
4667 static int
zfs_ioc_rename(zfs_cmd_t * zc)4668 zfs_ioc_rename(zfs_cmd_t *zc)
4669 {
4670 objset_t *os;
4671 dmu_objset_type_t ost;
4672 boolean_t recursive = zc->zc_cookie & 1;
4673 boolean_t nounmount = !!(zc->zc_cookie & 2);
4674 char *at;
4675 int err;
4676
4677 /* "zfs rename" from and to ...%recv datasets should both fail */
4678 zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
4679 zc->zc_value[sizeof (zc->zc_value) - 1] = '\0';
4680 if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 ||
4681 dataset_namecheck(zc->zc_value, NULL, NULL) != 0 ||
4682 strchr(zc->zc_name, '%') || strchr(zc->zc_value, '%'))
4683 return (SET_ERROR(EINVAL));
4684
4685 err = dmu_objset_hold(zc->zc_name, FTAG, &os);
4686 if (err != 0)
4687 return (err);
4688 ost = dmu_objset_type(os);
4689 dmu_objset_rele(os, FTAG);
4690
4691 at = strchr(zc->zc_name, '@');
4692 if (at != NULL) {
4693 /* snaps must be in same fs */
4694 int error;
4695
4696 if (strncmp(zc->zc_name, zc->zc_value, at - zc->zc_name + 1))
4697 return (SET_ERROR(EXDEV));
4698 *at = '\0';
4699 if (ost == DMU_OST_ZFS && !nounmount) {
4700 error = dmu_objset_find(zc->zc_name,
4701 recursive_unmount, at + 1,
4702 recursive ? DS_FIND_CHILDREN : 0);
4703 if (error != 0) {
4704 *at = '@';
4705 return (error);
4706 }
4707 }
4708 error = dsl_dataset_rename_snapshot(zc->zc_name,
4709 at + 1, strchr(zc->zc_value, '@') + 1, recursive);
4710 *at = '@';
4711
4712 return (error);
4713 } else {
4714 return (dsl_dir_rename(zc->zc_name, zc->zc_value));
4715 }
4716 }
4717
4718 static int
zfs_check_settable(const char * dsname,nvpair_t * pair,cred_t * cr)4719 zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
4720 {
4721 const char *propname = nvpair_name(pair);
4722 boolean_t issnap = (strchr(dsname, '@') != NULL);
4723 zfs_prop_t prop = zfs_name_to_prop(propname);
4724 uint64_t intval, compval;
4725 int err;
4726
4727 if (prop == ZPROP_USERPROP) {
4728 if (zfs_prop_user(propname)) {
4729 if ((err = zfs_secpolicy_write_perms(dsname,
4730 ZFS_DELEG_PERM_USERPROP, cr)))
4731 return (err);
4732 return (0);
4733 }
4734
4735 if (!issnap && zfs_prop_userquota(propname)) {
4736 const char *perm = NULL;
4737 const char *uq_prefix =
4738 zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA];
4739 const char *gq_prefix =
4740 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA];
4741 const char *uiq_prefix =
4742 zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA];
4743 const char *giq_prefix =
4744 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA];
4745 const char *pq_prefix =
4746 zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA];
4747 const char *piq_prefix = zfs_userquota_prop_prefixes[\
4748 ZFS_PROP_PROJECTOBJQUOTA];
4749
4750 if (strncmp(propname, uq_prefix,
4751 strlen(uq_prefix)) == 0) {
4752 perm = ZFS_DELEG_PERM_USERQUOTA;
4753 } else if (strncmp(propname, uiq_prefix,
4754 strlen(uiq_prefix)) == 0) {
4755 perm = ZFS_DELEG_PERM_USEROBJQUOTA;
4756 } else if (strncmp(propname, gq_prefix,
4757 strlen(gq_prefix)) == 0) {
4758 perm = ZFS_DELEG_PERM_GROUPQUOTA;
4759 } else if (strncmp(propname, giq_prefix,
4760 strlen(giq_prefix)) == 0) {
4761 perm = ZFS_DELEG_PERM_GROUPOBJQUOTA;
4762 } else if (strncmp(propname, pq_prefix,
4763 strlen(pq_prefix)) == 0) {
4764 perm = ZFS_DELEG_PERM_PROJECTQUOTA;
4765 } else if (strncmp(propname, piq_prefix,
4766 strlen(piq_prefix)) == 0) {
4767 perm = ZFS_DELEG_PERM_PROJECTOBJQUOTA;
4768 } else {
4769 /* {USER|GROUP|PROJECT}USED are read-only */
4770 return (SET_ERROR(EINVAL));
4771 }
4772
4773 if ((err = zfs_secpolicy_write_perms(dsname, perm, cr)))
4774 return (err);
4775 return (0);
4776 }
4777
4778 return (SET_ERROR(EINVAL));
4779 }
4780
4781 if (issnap)
4782 return (SET_ERROR(EINVAL));
4783
4784 if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
4785 /*
4786 * dsl_prop_get_all_impl() returns properties in this
4787 * format.
4788 */
4789 nvlist_t *attrs;
4790 VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
4791 VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
4792 &pair) == 0);
4793 }
4794
4795 /*
4796 * Check that this value is valid for this pool version
4797 */
4798 switch (prop) {
4799 case ZFS_PROP_COMPRESSION:
4800 /*
4801 * If the user specified gzip compression, make sure
4802 * the SPA supports it. We ignore any errors here since
4803 * we'll catch them later.
4804 */
4805 if (nvpair_value_uint64(pair, &intval) == 0) {
4806 compval = ZIO_COMPRESS_ALGO(intval);
4807 if (compval >= ZIO_COMPRESS_GZIP_1 &&
4808 compval <= ZIO_COMPRESS_GZIP_9 &&
4809 zfs_earlier_version(dsname,
4810 SPA_VERSION_GZIP_COMPRESSION)) {
4811 return (SET_ERROR(ENOTSUP));
4812 }
4813
4814 if (compval == ZIO_COMPRESS_ZLE &&
4815 zfs_earlier_version(dsname,
4816 SPA_VERSION_ZLE_COMPRESSION))
4817 return (SET_ERROR(ENOTSUP));
4818
4819 if (compval == ZIO_COMPRESS_LZ4) {
4820 spa_t *spa;
4821
4822 if ((err = spa_open(dsname, &spa, FTAG)) != 0)
4823 return (err);
4824
4825 if (!spa_feature_is_enabled(spa,
4826 SPA_FEATURE_LZ4_COMPRESS)) {
4827 spa_close(spa, FTAG);
4828 return (SET_ERROR(ENOTSUP));
4829 }
4830 spa_close(spa, FTAG);
4831 }
4832
4833 if (compval == ZIO_COMPRESS_ZSTD) {
4834 spa_t *spa;
4835
4836 if ((err = spa_open(dsname, &spa, FTAG)) != 0)
4837 return (err);
4838
4839 if (!spa_feature_is_enabled(spa,
4840 SPA_FEATURE_ZSTD_COMPRESS)) {
4841 spa_close(spa, FTAG);
4842 return (SET_ERROR(ENOTSUP));
4843 }
4844 spa_close(spa, FTAG);
4845 }
4846 }
4847 break;
4848
4849 case ZFS_PROP_COPIES:
4850 if (zfs_earlier_version(dsname, SPA_VERSION_DITTO_BLOCKS))
4851 return (SET_ERROR(ENOTSUP));
4852 break;
4853
4854 case ZFS_PROP_VOLBLOCKSIZE:
4855 case ZFS_PROP_RECORDSIZE:
4856 /* Record sizes above 128k need the feature to be enabled */
4857 if (nvpair_value_uint64(pair, &intval) == 0 &&
4858 intval > SPA_OLD_MAXBLOCKSIZE) {
4859 spa_t *spa;
4860
4861 /*
4862 * We don't allow setting the property above 1MB,
4863 * unless the tunable has been changed.
4864 */
4865 if (intval > zfs_max_recordsize ||
4866 intval > SPA_MAXBLOCKSIZE)
4867 return (SET_ERROR(ERANGE));
4868
4869 if ((err = spa_open(dsname, &spa, FTAG)) != 0)
4870 return (err);
4871
4872 if (!spa_feature_is_enabled(spa,
4873 SPA_FEATURE_LARGE_BLOCKS)) {
4874 spa_close(spa, FTAG);
4875 return (SET_ERROR(ENOTSUP));
4876 }
4877 spa_close(spa, FTAG);
4878 }
4879 break;
4880
4881 case ZFS_PROP_DNODESIZE:
4882 /* Dnode sizes above 512 need the feature to be enabled */
4883 if (nvpair_value_uint64(pair, &intval) == 0 &&
4884 intval != ZFS_DNSIZE_LEGACY) {
4885 spa_t *spa;
4886
4887 if ((err = spa_open(dsname, &spa, FTAG)) != 0)
4888 return (err);
4889
4890 if (!spa_feature_is_enabled(spa,
4891 SPA_FEATURE_LARGE_DNODE)) {
4892 spa_close(spa, FTAG);
4893 return (SET_ERROR(ENOTSUP));
4894 }
4895 spa_close(spa, FTAG);
4896 }
4897 break;
4898
4899 case ZFS_PROP_SPECIAL_SMALL_BLOCKS:
4900 /*
4901 * This property could require the allocation classes
4902 * feature to be active for setting, however we allow
4903 * it so that tests of settable properties succeed.
4904 * The CLI will issue a warning in this case.
4905 */
4906 break;
4907
4908 case ZFS_PROP_SHARESMB:
4909 if (zpl_earlier_version(dsname, ZPL_VERSION_FUID))
4910 return (SET_ERROR(ENOTSUP));
4911 break;
4912
4913 case ZFS_PROP_ACLINHERIT:
4914 if (nvpair_type(pair) == DATA_TYPE_UINT64 &&
4915 nvpair_value_uint64(pair, &intval) == 0) {
4916 if (intval == ZFS_ACL_PASSTHROUGH_X &&
4917 zfs_earlier_version(dsname,
4918 SPA_VERSION_PASSTHROUGH_X))
4919 return (SET_ERROR(ENOTSUP));
4920 }
4921 break;
4922 case ZFS_PROP_CHECKSUM:
4923 case ZFS_PROP_DEDUP:
4924 {
4925 spa_feature_t feature;
4926 spa_t *spa;
4927 int err;
4928
4929 /* dedup feature version checks */
4930 if (prop == ZFS_PROP_DEDUP &&
4931 zfs_earlier_version(dsname, SPA_VERSION_DEDUP))
4932 return (SET_ERROR(ENOTSUP));
4933
4934 if (nvpair_type(pair) == DATA_TYPE_UINT64 &&
4935 nvpair_value_uint64(pair, &intval) == 0) {
4936 /* check prop value is enabled in features */
4937 feature = zio_checksum_to_feature(
4938 intval & ZIO_CHECKSUM_MASK);
4939 if (feature == SPA_FEATURE_NONE)
4940 break;
4941
4942 if ((err = spa_open(dsname, &spa, FTAG)) != 0)
4943 return (err);
4944
4945 if (!spa_feature_is_enabled(spa, feature)) {
4946 spa_close(spa, FTAG);
4947 return (SET_ERROR(ENOTSUP));
4948 }
4949 spa_close(spa, FTAG);
4950 }
4951 break;
4952 }
4953
4954 default:
4955 break;
4956 }
4957
4958 return (zfs_secpolicy_setprop(dsname, prop, pair, CRED()));
4959 }
4960
4961 /*
4962 * Removes properties from the given props list that fail permission checks
4963 * needed to clear them and to restore them in case of a receive error. For each
4964 * property, make sure we have both set and inherit permissions.
4965 *
4966 * Returns the first error encountered if any permission checks fail. If the
4967 * caller provides a non-NULL errlist, it also gives the complete list of names
4968 * of all the properties that failed a permission check along with the
4969 * corresponding error numbers. The caller is responsible for freeing the
4970 * returned errlist.
4971 *
4972 * If every property checks out successfully, zero is returned and the list
4973 * pointed at by errlist is NULL.
4974 */
4975 static int
zfs_check_clearable(const char * dataset,nvlist_t * props,nvlist_t ** errlist)4976 zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
4977 {
4978 zfs_cmd_t *zc;
4979 nvpair_t *pair, *next_pair;
4980 nvlist_t *errors;
4981 int err, rv = 0;
4982
4983 if (props == NULL)
4984 return (0);
4985
4986 VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0);
4987
4988 zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP);
4989 (void) strlcpy(zc->zc_name, dataset, sizeof (zc->zc_name));
4990 pair = nvlist_next_nvpair(props, NULL);
4991 while (pair != NULL) {
4992 next_pair = nvlist_next_nvpair(props, pair);
4993
4994 (void) strlcpy(zc->zc_value, nvpair_name(pair),
4995 sizeof (zc->zc_value));
4996 if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 ||
4997 (err = zfs_secpolicy_inherit_prop(zc, NULL, CRED())) != 0) {
4998 VERIFY(nvlist_remove_nvpair(props, pair) == 0);
4999 VERIFY(nvlist_add_int32(errors,
5000 zc->zc_value, err) == 0);
5001 }
5002 pair = next_pair;
5003 }
5004 kmem_free(zc, sizeof (zfs_cmd_t));
5005
5006 if ((pair = nvlist_next_nvpair(errors, NULL)) == NULL) {
5007 nvlist_free(errors);
5008 errors = NULL;
5009 } else {
5010 VERIFY(nvpair_value_int32(pair, &rv) == 0);
5011 }
5012
5013 if (errlist == NULL)
5014 nvlist_free(errors);
5015 else
5016 *errlist = errors;
5017
5018 return (rv);
5019 }
5020
5021 static boolean_t
propval_equals(nvpair_t * p1,nvpair_t * p2)5022 propval_equals(nvpair_t *p1, nvpair_t *p2)
5023 {
5024 if (nvpair_type(p1) == DATA_TYPE_NVLIST) {
5025 /* dsl_prop_get_all_impl() format */
5026 nvlist_t *attrs;
5027 VERIFY(nvpair_value_nvlist(p1, &attrs) == 0);
5028 VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
5029 &p1) == 0);
5030 }
5031
5032 if (nvpair_type(p2) == DATA_TYPE_NVLIST) {
5033 nvlist_t *attrs;
5034 VERIFY(nvpair_value_nvlist(p2, &attrs) == 0);
5035 VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
5036 &p2) == 0);
5037 }
5038
5039 if (nvpair_type(p1) != nvpair_type(p2))
5040 return (B_FALSE);
5041
5042 if (nvpair_type(p1) == DATA_TYPE_STRING) {
5043 const char *valstr1, *valstr2;
5044
5045 VERIFY(nvpair_value_string(p1, &valstr1) == 0);
5046 VERIFY(nvpair_value_string(p2, &valstr2) == 0);
5047 return (strcmp(valstr1, valstr2) == 0);
5048 } else {
5049 uint64_t intval1, intval2;
5050
5051 VERIFY(nvpair_value_uint64(p1, &intval1) == 0);
5052 VERIFY(nvpair_value_uint64(p2, &intval2) == 0);
5053 return (intval1 == intval2);
5054 }
5055 }
5056
5057 /*
5058 * Remove properties from props if they are not going to change (as determined
5059 * by comparison with origprops). Remove them from origprops as well, since we
5060 * do not need to clear or restore properties that won't change.
5061 */
5062 static void
props_reduce(nvlist_t * props,nvlist_t * origprops)5063 props_reduce(nvlist_t *props, nvlist_t *origprops)
5064 {
5065 nvpair_t *pair, *next_pair;
5066
5067 if (origprops == NULL)
5068 return; /* all props need to be received */
5069
5070 pair = nvlist_next_nvpair(props, NULL);
5071 while (pair != NULL) {
5072 const char *propname = nvpair_name(pair);
5073 nvpair_t *match;
5074
5075 next_pair = nvlist_next_nvpair(props, pair);
5076
5077 if ((nvlist_lookup_nvpair(origprops, propname,
5078 &match) != 0) || !propval_equals(pair, match))
5079 goto next; /* need to set received value */
5080
5081 /* don't clear the existing received value */
5082 (void) nvlist_remove_nvpair(origprops, match);
5083 /* don't bother receiving the property */
5084 (void) nvlist_remove_nvpair(props, pair);
5085 next:
5086 pair = next_pair;
5087 }
5088 }
5089
5090 /*
5091 * Extract properties that cannot be set PRIOR to the receipt of a dataset.
5092 * For example, refquota cannot be set until after the receipt of a dataset,
5093 * because in replication streams, an older/earlier snapshot may exceed the
5094 * refquota. We want to receive the older/earlier snapshot, but setting
5095 * refquota pre-receipt will set the dsl's ACTUAL quota, which will prevent
5096 * the older/earlier snapshot from being received (with EDQUOT).
5097 *
5098 * The ZFS test "zfs_receive_011_pos" demonstrates such a scenario.
5099 *
5100 * libzfs will need to be judicious handling errors encountered by props
5101 * extracted by this function.
5102 */
5103 static nvlist_t *
extract_delay_props(nvlist_t * props)5104 extract_delay_props(nvlist_t *props)
5105 {
5106 nvlist_t *delayprops;
5107 nvpair_t *nvp, *tmp;
5108 static const zfs_prop_t delayable[] = {
5109 ZFS_PROP_REFQUOTA,
5110 ZFS_PROP_KEYLOCATION,
5111 /*
5112 * Setting ZFS_PROP_SHARESMB requires the objset type to be
5113 * known, which is not possible prior to receipt of raw sends.
5114 */
5115 ZFS_PROP_SHARESMB,
5116 0
5117 };
5118 int i;
5119
5120 VERIFY(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
5121
5122 for (nvp = nvlist_next_nvpair(props, NULL); nvp != NULL;
5123 nvp = nvlist_next_nvpair(props, nvp)) {
5124 /*
5125 * strcmp() is safe because zfs_prop_to_name() always returns
5126 * a bounded string.
5127 */
5128 for (i = 0; delayable[i] != 0; i++) {
5129 if (strcmp(zfs_prop_to_name(delayable[i]),
5130 nvpair_name(nvp)) == 0) {
5131 break;
5132 }
5133 }
5134 if (delayable[i] != 0) {
5135 tmp = nvlist_prev_nvpair(props, nvp);
5136 VERIFY(nvlist_add_nvpair(delayprops, nvp) == 0);
5137 VERIFY(nvlist_remove_nvpair(props, nvp) == 0);
5138 nvp = tmp;
5139 }
5140 }
5141
5142 if (nvlist_empty(delayprops)) {
5143 nvlist_free(delayprops);
5144 delayprops = NULL;
5145 }
5146 return (delayprops);
5147 }
5148
5149 static void
zfs_allow_log_destroy(void * arg)5150 zfs_allow_log_destroy(void *arg)
5151 {
5152 char *poolname = arg;
5153
5154 if (poolname != NULL)
5155 kmem_strfree(poolname);
5156 }
5157
5158 #ifdef ZFS_DEBUG
5159 static boolean_t zfs_ioc_recv_inject_err;
5160 #endif
5161
5162 /*
5163 * nvlist 'errors' is always allocated. It will contain descriptions of
5164 * encountered errors, if any. It's the callers responsibility to free.
5165 */
5166 static int
zfs_ioc_recv_impl(char * tofs,char * tosnap,const char * origin,nvlist_t * recvprops,nvlist_t * localprops,nvlist_t * hidden_args,boolean_t force,boolean_t heal,boolean_t resumable,int input_fd,dmu_replay_record_t * begin_record,uint64_t * read_bytes,uint64_t * errflags,nvlist_t ** errors)5167 zfs_ioc_recv_impl(char *tofs, char *tosnap, const char *origin,
5168 nvlist_t *recvprops, nvlist_t *localprops, nvlist_t *hidden_args,
5169 boolean_t force, boolean_t heal, boolean_t resumable, int input_fd,
5170 dmu_replay_record_t *begin_record, uint64_t *read_bytes,
5171 uint64_t *errflags, nvlist_t **errors)
5172 {
5173 dmu_recv_cookie_t drc;
5174 int error = 0;
5175 int props_error = 0;
5176 offset_t off, noff;
5177 nvlist_t *local_delayprops = NULL;
5178 nvlist_t *recv_delayprops = NULL;
5179 nvlist_t *inherited_delayprops = NULL;
5180 nvlist_t *origprops = NULL; /* existing properties */
5181 nvlist_t *origrecvd = NULL; /* existing received properties */
5182 boolean_t first_recvd_props = B_FALSE;
5183 boolean_t tofs_was_redacted;
5184 zfs_file_t *input_fp;
5185
5186 *read_bytes = 0;
5187 *errflags = 0;
5188 *errors = fnvlist_alloc();
5189 off = 0;
5190
5191 if ((input_fp = zfs_file_get(input_fd)) == NULL)
5192 return (SET_ERROR(EBADF));
5193
5194 noff = off = zfs_file_off(input_fp);
5195 error = dmu_recv_begin(tofs, tosnap, begin_record, force, heal,
5196 resumable, localprops, hidden_args, origin, &drc, input_fp,
5197 &off);
5198 if (error != 0)
5199 goto out;
5200 tofs_was_redacted = dsl_get_redacted(drc.drc_ds);
5201
5202 /*
5203 * Set properties before we receive the stream so that they are applied
5204 * to the new data. Note that we must call dmu_recv_stream() if
5205 * dmu_recv_begin() succeeds.
5206 */
5207 if (recvprops != NULL && !drc.drc_newfs) {
5208 if (spa_version(dsl_dataset_get_spa(drc.drc_ds)) >=
5209 SPA_VERSION_RECVD_PROPS &&
5210 !dsl_prop_get_hasrecvd(tofs))
5211 first_recvd_props = B_TRUE;
5212
5213 /*
5214 * If new received properties are supplied, they are to
5215 * completely replace the existing received properties,
5216 * so stash away the existing ones.
5217 */
5218 if (dsl_prop_get_received(tofs, &origrecvd) == 0) {
5219 nvlist_t *errlist = NULL;
5220 /*
5221 * Don't bother writing a property if its value won't
5222 * change (and avoid the unnecessary security checks).
5223 *
5224 * The first receive after SPA_VERSION_RECVD_PROPS is a
5225 * special case where we blow away all local properties
5226 * regardless.
5227 */
5228 if (!first_recvd_props)
5229 props_reduce(recvprops, origrecvd);
5230 if (zfs_check_clearable(tofs, origrecvd, &errlist) != 0)
5231 (void) nvlist_merge(*errors, errlist, 0);
5232 nvlist_free(errlist);
5233
5234 if (clear_received_props(tofs, origrecvd,
5235 first_recvd_props ? NULL : recvprops) != 0)
5236 *errflags |= ZPROP_ERR_NOCLEAR;
5237 } else {
5238 *errflags |= ZPROP_ERR_NOCLEAR;
5239 }
5240 }
5241
5242 /*
5243 * Stash away existing properties so we can restore them on error unless
5244 * we're doing the first receive after SPA_VERSION_RECVD_PROPS, in which
5245 * case "origrecvd" will take care of that.
5246 */
5247 if (localprops != NULL && !drc.drc_newfs && !first_recvd_props) {
5248 objset_t *os;
5249 if (dmu_objset_hold(tofs, FTAG, &os) == 0) {
5250 if (dsl_prop_get_all(os, &origprops) != 0) {
5251 *errflags |= ZPROP_ERR_NOCLEAR;
5252 }
5253 dmu_objset_rele(os, FTAG);
5254 } else {
5255 *errflags |= ZPROP_ERR_NOCLEAR;
5256 }
5257 }
5258
5259 if (recvprops != NULL) {
5260 props_error = dsl_prop_set_hasrecvd(tofs);
5261
5262 if (props_error == 0) {
5263 recv_delayprops = extract_delay_props(recvprops);
5264 (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED,
5265 recvprops, *errors);
5266 }
5267 }
5268
5269 if (localprops != NULL) {
5270 nvlist_t *oprops = fnvlist_alloc();
5271 nvlist_t *xprops = fnvlist_alloc();
5272 nvpair_t *nvp = NULL;
5273
5274 while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) {
5275 if (nvpair_type(nvp) == DATA_TYPE_BOOLEAN) {
5276 /* -x property */
5277 const char *name = nvpair_name(nvp);
5278 zfs_prop_t prop = zfs_name_to_prop(name);
5279 if (prop != ZPROP_USERPROP) {
5280 if (!zfs_prop_inheritable(prop))
5281 continue;
5282 } else if (!zfs_prop_user(name))
5283 continue;
5284 fnvlist_add_boolean(xprops, name);
5285 } else {
5286 /* -o property=value */
5287 fnvlist_add_nvpair(oprops, nvp);
5288 }
5289 }
5290
5291 local_delayprops = extract_delay_props(oprops);
5292 (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL,
5293 oprops, *errors);
5294 inherited_delayprops = extract_delay_props(xprops);
5295 (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED,
5296 xprops, *errors);
5297
5298 nvlist_free(oprops);
5299 nvlist_free(xprops);
5300 }
5301
5302 error = dmu_recv_stream(&drc, &off);
5303
5304 if (error == 0) {
5305 zfsvfs_t *zfsvfs = NULL;
5306 zvol_state_handle_t *zv = NULL;
5307
5308 if (getzfsvfs(tofs, &zfsvfs) == 0) {
5309 /* online recv */
5310 dsl_dataset_t *ds;
5311 int end_err;
5312 boolean_t stream_is_redacted = DMU_GET_FEATUREFLAGS(
5313 begin_record->drr_u.drr_begin.
5314 drr_versioninfo) & DMU_BACKUP_FEATURE_REDACTED;
5315
5316 ds = dmu_objset_ds(zfsvfs->z_os);
5317 error = zfs_suspend_fs(zfsvfs);
5318 /*
5319 * If the suspend fails, then the recv_end will
5320 * likely also fail, and clean up after itself.
5321 */
5322 end_err = dmu_recv_end(&drc, zfsvfs);
5323 /*
5324 * If the dataset was not redacted, but we received a
5325 * redacted stream onto it, we need to unmount the
5326 * dataset. Otherwise, resume the filesystem.
5327 */
5328 if (error == 0 && !drc.drc_newfs &&
5329 stream_is_redacted && !tofs_was_redacted) {
5330 error = zfs_end_fs(zfsvfs, ds);
5331 } else if (error == 0) {
5332 error = zfs_resume_fs(zfsvfs, ds);
5333 }
5334 error = error ? error : end_err;
5335 zfs_vfs_rele(zfsvfs);
5336 } else if ((zv = zvol_suspend(tofs)) != NULL) {
5337 error = dmu_recv_end(&drc, zvol_tag(zv));
5338 zvol_resume(zv);
5339 } else {
5340 error = dmu_recv_end(&drc, NULL);
5341 }
5342
5343 /* Set delayed properties now, after we're done receiving. */
5344 if (recv_delayprops != NULL && error == 0) {
5345 (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED,
5346 recv_delayprops, *errors);
5347 }
5348 if (local_delayprops != NULL && error == 0) {
5349 (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL,
5350 local_delayprops, *errors);
5351 }
5352 if (inherited_delayprops != NULL && error == 0) {
5353 (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED,
5354 inherited_delayprops, *errors);
5355 }
5356 }
5357
5358 /*
5359 * Merge delayed props back in with initial props, in case
5360 * we're DEBUG and zfs_ioc_recv_inject_err is set (which means
5361 * we have to make sure clear_received_props() includes
5362 * the delayed properties).
5363 *
5364 * Since zfs_ioc_recv_inject_err is only in DEBUG kernels,
5365 * using ASSERT() will be just like a VERIFY.
5366 */
5367 if (recv_delayprops != NULL) {
5368 ASSERT(nvlist_merge(recvprops, recv_delayprops, 0) == 0);
5369 nvlist_free(recv_delayprops);
5370 }
5371 if (local_delayprops != NULL) {
5372 ASSERT(nvlist_merge(localprops, local_delayprops, 0) == 0);
5373 nvlist_free(local_delayprops);
5374 }
5375 if (inherited_delayprops != NULL) {
5376 ASSERT(nvlist_merge(localprops, inherited_delayprops, 0) == 0);
5377 nvlist_free(inherited_delayprops);
5378 }
5379 *read_bytes = off - noff;
5380
5381 #ifdef ZFS_DEBUG
5382 if (zfs_ioc_recv_inject_err) {
5383 zfs_ioc_recv_inject_err = B_FALSE;
5384 error = 1;
5385 }
5386 #endif
5387
5388 /*
5389 * On error, restore the original props.
5390 */
5391 if (error != 0 && recvprops != NULL && !drc.drc_newfs) {
5392 if (clear_received_props(tofs, recvprops, NULL) != 0) {
5393 /*
5394 * We failed to clear the received properties.
5395 * Since we may have left a $recvd value on the
5396 * system, we can't clear the $hasrecvd flag.
5397 */
5398 *errflags |= ZPROP_ERR_NORESTORE;
5399 } else if (first_recvd_props) {
5400 dsl_prop_unset_hasrecvd(tofs);
5401 }
5402
5403 if (origrecvd == NULL && !drc.drc_newfs) {
5404 /* We failed to stash the original properties. */
5405 *errflags |= ZPROP_ERR_NORESTORE;
5406 }
5407
5408 /*
5409 * dsl_props_set() will not convert RECEIVED to LOCAL on or
5410 * after SPA_VERSION_RECVD_PROPS, so we need to specify LOCAL
5411 * explicitly if we're restoring local properties cleared in the
5412 * first new-style receive.
5413 */
5414 if (origrecvd != NULL &&
5415 zfs_set_prop_nvlist(tofs, (first_recvd_props ?
5416 ZPROP_SRC_LOCAL : ZPROP_SRC_RECEIVED),
5417 origrecvd, NULL) != 0) {
5418 /*
5419 * We stashed the original properties but failed to
5420 * restore them.
5421 */
5422 *errflags |= ZPROP_ERR_NORESTORE;
5423 }
5424 }
5425 if (error != 0 && localprops != NULL && !drc.drc_newfs &&
5426 !first_recvd_props) {
5427 nvlist_t *setprops;
5428 nvlist_t *inheritprops;
5429 nvpair_t *nvp;
5430
5431 if (origprops == NULL) {
5432 /* We failed to stash the original properties. */
5433 *errflags |= ZPROP_ERR_NORESTORE;
5434 goto out;
5435 }
5436
5437 /* Restore original props */
5438 setprops = fnvlist_alloc();
5439 inheritprops = fnvlist_alloc();
5440 nvp = NULL;
5441 while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) {
5442 const char *name = nvpair_name(nvp);
5443 const char *source;
5444 nvlist_t *attrs;
5445
5446 if (!nvlist_exists(origprops, name)) {
5447 /*
5448 * Property was not present or was explicitly
5449 * inherited before the receive, restore this.
5450 */
5451 fnvlist_add_boolean(inheritprops, name);
5452 continue;
5453 }
5454 attrs = fnvlist_lookup_nvlist(origprops, name);
5455 source = fnvlist_lookup_string(attrs, ZPROP_SOURCE);
5456
5457 /* Skip received properties */
5458 if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0)
5459 continue;
5460
5461 if (strcmp(source, tofs) == 0) {
5462 /* Property was locally set */
5463 fnvlist_add_nvlist(setprops, name, attrs);
5464 } else {
5465 /* Property was implicitly inherited */
5466 fnvlist_add_boolean(inheritprops, name);
5467 }
5468 }
5469
5470 if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL, setprops,
5471 NULL) != 0)
5472 *errflags |= ZPROP_ERR_NORESTORE;
5473 if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED, inheritprops,
5474 NULL) != 0)
5475 *errflags |= ZPROP_ERR_NORESTORE;
5476
5477 nvlist_free(setprops);
5478 nvlist_free(inheritprops);
5479 }
5480 out:
5481 zfs_file_put(input_fp);
5482 nvlist_free(origrecvd);
5483 nvlist_free(origprops);
5484
5485 if (error == 0)
5486 error = props_error;
5487
5488 return (error);
5489 }
5490
5491 /*
5492 * inputs:
5493 * zc_name name of containing filesystem (unused)
5494 * zc_nvlist_src{_size} nvlist of properties to apply
5495 * zc_nvlist_conf{_size} nvlist of properties to exclude
5496 * (DATA_TYPE_BOOLEAN) and override (everything else)
5497 * zc_value name of snapshot to create
5498 * zc_string name of clone origin (if DRR_FLAG_CLONE)
5499 * zc_cookie file descriptor to recv from
5500 * zc_begin_record the BEGIN record of the stream (not byteswapped)
5501 * zc_guid force flag
5502 *
5503 * outputs:
5504 * zc_cookie number of bytes read
5505 * zc_obj zprop_errflags_t
5506 * zc_nvlist_dst{_size} error for each unapplied received property
5507 */
5508 static int
zfs_ioc_recv(zfs_cmd_t * zc)5509 zfs_ioc_recv(zfs_cmd_t *zc)
5510 {
5511 dmu_replay_record_t begin_record;
5512 nvlist_t *errors = NULL;
5513 nvlist_t *recvdprops = NULL;
5514 nvlist_t *localprops = NULL;
5515 const char *origin = NULL;
5516 char *tosnap;
5517 char tofs[ZFS_MAX_DATASET_NAME_LEN];
5518 int error = 0;
5519
5520 if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 ||
5521 strchr(zc->zc_value, '@') == NULL ||
5522 strchr(zc->zc_value, '%') != NULL) {
5523 return (SET_ERROR(EINVAL));
5524 }
5525
5526 (void) strlcpy(tofs, zc->zc_value, sizeof (tofs));
5527 tosnap = strchr(tofs, '@');
5528 *tosnap++ = '\0';
5529
5530 if (zc->zc_nvlist_src != 0 &&
5531 (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
5532 zc->zc_iflags, &recvdprops)) != 0) {
5533 goto out;
5534 }
5535
5536 if (zc->zc_nvlist_conf != 0 &&
5537 (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
5538 zc->zc_iflags, &localprops)) != 0) {
5539 goto out;
5540 }
5541
5542 if (zc->zc_string[0])
5543 origin = zc->zc_string;
5544
5545 begin_record.drr_type = DRR_BEGIN;
5546 begin_record.drr_payloadlen = 0;
5547 begin_record.drr_u.drr_begin = zc->zc_begin_record;
5548
5549 error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvdprops, localprops,
5550 NULL, zc->zc_guid, B_FALSE, B_FALSE, zc->zc_cookie, &begin_record,
5551 &zc->zc_cookie, &zc->zc_obj, &errors);
5552
5553 /*
5554 * Now that all props, initial and delayed, are set, report the prop
5555 * errors to the caller.
5556 */
5557 if (zc->zc_nvlist_dst_size != 0 && errors != NULL &&
5558 (nvlist_smush(errors, zc->zc_nvlist_dst_size) != 0 ||
5559 put_nvlist(zc, errors) != 0)) {
5560 /*
5561 * Caller made zc->zc_nvlist_dst less than the minimum expected
5562 * size or supplied an invalid address.
5563 */
5564 error = SET_ERROR(EINVAL);
5565 }
5566
5567 out:
5568 nvlist_free(errors);
5569 nvlist_free(recvdprops);
5570 nvlist_free(localprops);
5571
5572 return (error);
5573 }
5574
5575 /*
5576 * innvl: {
5577 * "snapname" -> full name of the snapshot to create
5578 * (optional) "props" -> received properties to set (nvlist)
5579 * (optional) "localprops" -> override and exclude properties (nvlist)
5580 * (optional) "origin" -> name of clone origin (DRR_FLAG_CLONE)
5581 * "begin_record" -> non-byteswapped dmu_replay_record_t
5582 * "input_fd" -> file descriptor to read stream from (int32)
5583 * (optional) "force" -> force flag (value ignored)
5584 * (optional) "heal" -> use send stream to heal data corruption
5585 * (optional) "resumable" -> resumable flag (value ignored)
5586 * (optional) "cleanup_fd" -> unused
5587 * (optional) "action_handle" -> unused
5588 * (optional) "hidden_args" -> { "wkeydata" -> value }
5589 * }
5590 *
5591 * outnvl: {
5592 * "read_bytes" -> number of bytes read
5593 * "error_flags" -> zprop_errflags_t
5594 * "errors" -> error for each unapplied received property (nvlist)
5595 * }
5596 */
5597 static const zfs_ioc_key_t zfs_keys_recv_new[] = {
5598 {"snapname", DATA_TYPE_STRING, 0},
5599 {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
5600 {"localprops", DATA_TYPE_NVLIST, ZK_OPTIONAL},
5601 {"origin", DATA_TYPE_STRING, ZK_OPTIONAL},
5602 {"begin_record", DATA_TYPE_BYTE_ARRAY, 0},
5603 {"input_fd", DATA_TYPE_INT32, 0},
5604 {"force", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
5605 {"heal", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
5606 {"resumable", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
5607 {"cleanup_fd", DATA_TYPE_INT32, ZK_OPTIONAL},
5608 {"action_handle", DATA_TYPE_UINT64, ZK_OPTIONAL},
5609 {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
5610 };
5611
5612 static int
zfs_ioc_recv_new(const char * fsname,nvlist_t * innvl,nvlist_t * outnvl)5613 zfs_ioc_recv_new(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
5614 {
5615 dmu_replay_record_t *begin_record;
5616 uint_t begin_record_size;
5617 nvlist_t *errors = NULL;
5618 nvlist_t *recvprops = NULL;
5619 nvlist_t *localprops = NULL;
5620 nvlist_t *hidden_args = NULL;
5621 const char *snapname;
5622 const char *origin = NULL;
5623 char *tosnap;
5624 char tofs[ZFS_MAX_DATASET_NAME_LEN];
5625 boolean_t force;
5626 boolean_t heal;
5627 boolean_t resumable;
5628 uint64_t read_bytes = 0;
5629 uint64_t errflags = 0;
5630 int input_fd = -1;
5631 int error;
5632
5633 snapname = fnvlist_lookup_string(innvl, "snapname");
5634
5635 if (dataset_namecheck(snapname, NULL, NULL) != 0 ||
5636 strchr(snapname, '@') == NULL ||
5637 strchr(snapname, '%') != NULL) {
5638 return (SET_ERROR(EINVAL));
5639 }
5640
5641 (void) strlcpy(tofs, snapname, sizeof (tofs));
5642 tosnap = strchr(tofs, '@');
5643 *tosnap++ = '\0';
5644
5645 error = nvlist_lookup_string(innvl, "origin", &origin);
5646 if (error && error != ENOENT)
5647 return (error);
5648
5649 error = nvlist_lookup_byte_array(innvl, "begin_record",
5650 (uchar_t **)&begin_record, &begin_record_size);
5651 if (error != 0 || begin_record_size != sizeof (*begin_record))
5652 return (SET_ERROR(EINVAL));
5653
5654 input_fd = fnvlist_lookup_int32(innvl, "input_fd");
5655
5656 force = nvlist_exists(innvl, "force");
5657 heal = nvlist_exists(innvl, "heal");
5658 resumable = nvlist_exists(innvl, "resumable");
5659
5660 /* we still use "props" here for backwards compatibility */
5661 error = nvlist_lookup_nvlist(innvl, "props", &recvprops);
5662 if (error && error != ENOENT)
5663 goto out;
5664
5665 error = nvlist_lookup_nvlist(innvl, "localprops", &localprops);
5666 if (error && error != ENOENT)
5667 goto out;
5668
5669 error = nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
5670 if (error && error != ENOENT)
5671 goto out;
5672
5673 error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvprops, localprops,
5674 hidden_args, force, heal, resumable, input_fd, begin_record,
5675 &read_bytes, &errflags, &errors);
5676
5677 fnvlist_add_uint64(outnvl, "read_bytes", read_bytes);
5678 fnvlist_add_uint64(outnvl, "error_flags", errflags);
5679 fnvlist_add_nvlist(outnvl, "errors", errors);
5680
5681 out:
5682 nvlist_free(errors);
5683 nvlist_free(recvprops);
5684 nvlist_free(localprops);
5685 nvlist_free(hidden_args);
5686
5687 return (error);
5688 }
5689
5690 /*
5691 * When stack space is limited, we write replication stream data to the target
5692 * on a separate taskq thread, to make sure there's enough stack space.
5693 */
5694 #ifndef HAVE_LARGE_STACKS
5695 #define USE_SEND_TASKQ 1
5696 #endif
5697
5698 typedef struct dump_bytes_io {
5699 zfs_file_t *dbi_fp;
5700 caddr_t dbi_buf;
5701 int dbi_len;
5702 int dbi_err;
5703 } dump_bytes_io_t;
5704
5705 static void
dump_bytes_cb(void * arg)5706 dump_bytes_cb(void *arg)
5707 {
5708 dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg;
5709 zfs_file_t *fp;
5710 caddr_t buf;
5711
5712 fp = dbi->dbi_fp;
5713 buf = dbi->dbi_buf;
5714
5715 dbi->dbi_err = zfs_file_write(fp, buf, dbi->dbi_len, NULL);
5716 }
5717
5718 typedef struct dump_bytes_arg {
5719 zfs_file_t *dba_fp;
5720 #ifdef USE_SEND_TASKQ
5721 taskq_t *dba_tq;
5722 taskq_ent_t dba_tqent;
5723 #endif
5724 } dump_bytes_arg_t;
5725
5726 static int
dump_bytes(objset_t * os,void * buf,int len,void * arg)5727 dump_bytes(objset_t *os, void *buf, int len, void *arg)
5728 {
5729 dump_bytes_arg_t *dba = (dump_bytes_arg_t *)arg;
5730 dump_bytes_io_t dbi;
5731
5732 dbi.dbi_fp = dba->dba_fp;
5733 dbi.dbi_buf = buf;
5734 dbi.dbi_len = len;
5735
5736 #ifdef USE_SEND_TASKQ
5737 taskq_dispatch_ent(dba->dba_tq, dump_bytes_cb, &dbi, TQ_SLEEP,
5738 &dba->dba_tqent);
5739 taskq_wait(dba->dba_tq);
5740 #else
5741 dump_bytes_cb(&dbi);
5742 #endif
5743
5744 return (dbi.dbi_err);
5745 }
5746
5747 static int
dump_bytes_init(dump_bytes_arg_t * dba,int fd,dmu_send_outparams_t * out)5748 dump_bytes_init(dump_bytes_arg_t *dba, int fd, dmu_send_outparams_t *out)
5749 {
5750 zfs_file_t *fp = zfs_file_get(fd);
5751 if (fp == NULL)
5752 return (SET_ERROR(EBADF));
5753
5754 dba->dba_fp = fp;
5755 #ifdef USE_SEND_TASKQ
5756 dba->dba_tq = taskq_create("z_send", 1, defclsyspri, 0, 0, 0);
5757 taskq_init_ent(&dba->dba_tqent);
5758 #endif
5759
5760 memset(out, 0, sizeof (dmu_send_outparams_t));
5761 out->dso_outfunc = dump_bytes;
5762 out->dso_arg = dba;
5763 out->dso_dryrun = B_FALSE;
5764
5765 return (0);
5766 }
5767
5768 static void
dump_bytes_fini(dump_bytes_arg_t * dba)5769 dump_bytes_fini(dump_bytes_arg_t *dba)
5770 {
5771 zfs_file_put(dba->dba_fp);
5772 #ifdef USE_SEND_TASKQ
5773 taskq_destroy(dba->dba_tq);
5774 #endif
5775 }
5776
5777 /*
5778 * inputs:
5779 * zc_name name of snapshot to send
5780 * zc_cookie file descriptor to send stream to
5781 * zc_obj fromorigin flag (mutually exclusive with zc_fromobj)
5782 * zc_sendobj objsetid of snapshot to send
5783 * zc_fromobj objsetid of incremental fromsnap (may be zero)
5784 * zc_guid if set, estimate size of stream only. zc_cookie is ignored.
5785 * output size in zc_objset_type.
5786 * zc_flags lzc_send_flags
5787 *
5788 * outputs:
5789 * zc_objset_type estimated size, if zc_guid is set
5790 *
5791 * NOTE: This is no longer the preferred interface, any new functionality
5792 * should be added to zfs_ioc_send_new() instead.
5793 */
5794 static int
zfs_ioc_send(zfs_cmd_t * zc)5795 zfs_ioc_send(zfs_cmd_t *zc)
5796 {
5797 int error;
5798 offset_t off;
5799 boolean_t estimate = (zc->zc_guid != 0);
5800 boolean_t embedok = (zc->zc_flags & 0x1);
5801 boolean_t large_block_ok = (zc->zc_flags & 0x2);
5802 boolean_t compressok = (zc->zc_flags & 0x4);
5803 boolean_t rawok = (zc->zc_flags & 0x8);
5804 boolean_t savedok = (zc->zc_flags & 0x10);
5805
5806 if (zc->zc_obj != 0) {
5807 dsl_pool_t *dp;
5808 dsl_dataset_t *tosnap;
5809
5810 error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
5811 if (error != 0)
5812 return (error);
5813
5814 error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &tosnap);
5815 if (error != 0) {
5816 dsl_pool_rele(dp, FTAG);
5817 return (error);
5818 }
5819
5820 if (dsl_dir_is_clone(tosnap->ds_dir))
5821 zc->zc_fromobj =
5822 dsl_dir_phys(tosnap->ds_dir)->dd_origin_obj;
5823 dsl_dataset_rele(tosnap, FTAG);
5824 dsl_pool_rele(dp, FTAG);
5825 }
5826
5827 if (estimate) {
5828 dsl_pool_t *dp;
5829 dsl_dataset_t *tosnap;
5830 dsl_dataset_t *fromsnap = NULL;
5831
5832 error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
5833 if (error != 0)
5834 return (error);
5835
5836 error = dsl_dataset_hold_obj(dp, zc->zc_sendobj,
5837 FTAG, &tosnap);
5838 if (error != 0) {
5839 dsl_pool_rele(dp, FTAG);
5840 return (error);
5841 }
5842
5843 if (zc->zc_fromobj != 0) {
5844 error = dsl_dataset_hold_obj(dp, zc->zc_fromobj,
5845 FTAG, &fromsnap);
5846 if (error != 0) {
5847 dsl_dataset_rele(tosnap, FTAG);
5848 dsl_pool_rele(dp, FTAG);
5849 return (error);
5850 }
5851 }
5852
5853 error = dmu_send_estimate_fast(tosnap, fromsnap, NULL,
5854 compressok || rawok, savedok, &zc->zc_objset_type);
5855
5856 if (fromsnap != NULL)
5857 dsl_dataset_rele(fromsnap, FTAG);
5858 dsl_dataset_rele(tosnap, FTAG);
5859 dsl_pool_rele(dp, FTAG);
5860 } else {
5861 dump_bytes_arg_t dba;
5862 dmu_send_outparams_t out;
5863 error = dump_bytes_init(&dba, zc->zc_cookie, &out);
5864 if (error)
5865 return (error);
5866
5867 off = zfs_file_off(dba.dba_fp);
5868 error = dmu_send_obj(zc->zc_name, zc->zc_sendobj,
5869 zc->zc_fromobj, embedok, large_block_ok, compressok,
5870 rawok, savedok, zc->zc_cookie, &off, &out);
5871
5872 dump_bytes_fini(&dba);
5873 }
5874 return (error);
5875 }
5876
5877 /*
5878 * inputs:
5879 * zc_name name of snapshot on which to report progress
5880 * zc_cookie file descriptor of send stream
5881 *
5882 * outputs:
5883 * zc_cookie number of bytes written in send stream thus far
5884 * zc_objset_type logical size of data traversed by send thus far
5885 */
5886 static int
zfs_ioc_send_progress(zfs_cmd_t * zc)5887 zfs_ioc_send_progress(zfs_cmd_t *zc)
5888 {
5889 dsl_pool_t *dp;
5890 dsl_dataset_t *ds;
5891 dmu_sendstatus_t *dsp = NULL;
5892 int error;
5893
5894 error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
5895 if (error != 0)
5896 return (error);
5897
5898 error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds);
5899 if (error != 0) {
5900 dsl_pool_rele(dp, FTAG);
5901 return (error);
5902 }
5903
5904 mutex_enter(&ds->ds_sendstream_lock);
5905
5906 /*
5907 * Iterate over all the send streams currently active on this dataset.
5908 * If there's one which matches the specified file descriptor _and_ the
5909 * stream was started by the current process, return the progress of
5910 * that stream.
5911 */
5912
5913 for (dsp = list_head(&ds->ds_sendstreams); dsp != NULL;
5914 dsp = list_next(&ds->ds_sendstreams, dsp)) {
5915 if (dsp->dss_outfd == zc->zc_cookie &&
5916 zfs_proc_is_caller(dsp->dss_proc))
5917 break;
5918 }
5919
5920 if (dsp != NULL) {
5921 zc->zc_cookie = atomic_cas_64((volatile uint64_t *)dsp->dss_off,
5922 0, 0);
5923 /* This is the closest thing we have to atomic_read_64. */
5924 zc->zc_objset_type = atomic_cas_64(&dsp->dss_blocks, 0, 0);
5925 } else {
5926 error = SET_ERROR(ENOENT);
5927 }
5928
5929 mutex_exit(&ds->ds_sendstream_lock);
5930 dsl_dataset_rele(ds, FTAG);
5931 dsl_pool_rele(dp, FTAG);
5932 return (error);
5933 }
5934
5935 static int
zfs_ioc_inject_fault(zfs_cmd_t * zc)5936 zfs_ioc_inject_fault(zfs_cmd_t *zc)
5937 {
5938 int id, error;
5939
5940 error = zio_inject_fault(zc->zc_name, (int)zc->zc_guid, &id,
5941 &zc->zc_inject_record);
5942
5943 if (error == 0)
5944 zc->zc_guid = (uint64_t)id;
5945
5946 return (error);
5947 }
5948
5949 static int
zfs_ioc_clear_fault(zfs_cmd_t * zc)5950 zfs_ioc_clear_fault(zfs_cmd_t *zc)
5951 {
5952 return (zio_clear_fault((int)zc->zc_guid));
5953 }
5954
5955 static int
zfs_ioc_inject_list_next(zfs_cmd_t * zc)5956 zfs_ioc_inject_list_next(zfs_cmd_t *zc)
5957 {
5958 int id = (int)zc->zc_guid;
5959 int error;
5960
5961 error = zio_inject_list_next(&id, zc->zc_name, sizeof (zc->zc_name),
5962 &zc->zc_inject_record);
5963
5964 zc->zc_guid = id;
5965
5966 return (error);
5967 }
5968
5969 static int
zfs_ioc_error_log(zfs_cmd_t * zc)5970 zfs_ioc_error_log(zfs_cmd_t *zc)
5971 {
5972 spa_t *spa;
5973 int error;
5974
5975 if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
5976 return (error);
5977
5978 error = spa_get_errlog(spa, (void *)(uintptr_t)zc->zc_nvlist_dst,
5979 &zc->zc_nvlist_dst_size);
5980
5981 spa_close(spa, FTAG);
5982
5983 return (error);
5984 }
5985
5986 static int
zfs_ioc_clear(zfs_cmd_t * zc)5987 zfs_ioc_clear(zfs_cmd_t *zc)
5988 {
5989 spa_t *spa;
5990 vdev_t *vd;
5991 int error;
5992
5993 /*
5994 * On zpool clear we also fix up missing slogs
5995 */
5996 mutex_enter(&spa_namespace_lock);
5997 spa = spa_lookup(zc->zc_name);
5998 if (spa == NULL) {
5999 mutex_exit(&spa_namespace_lock);
6000 return (SET_ERROR(EIO));
6001 }
6002 if (spa_get_log_state(spa) == SPA_LOG_MISSING) {
6003 /* we need to let spa_open/spa_load clear the chains */
6004 spa_set_log_state(spa, SPA_LOG_CLEAR);
6005 }
6006 spa->spa_last_open_failed = 0;
6007 mutex_exit(&spa_namespace_lock);
6008
6009 if (zc->zc_cookie & ZPOOL_NO_REWIND) {
6010 error = spa_open(zc->zc_name, &spa, FTAG);
6011 } else {
6012 nvlist_t *policy;
6013 nvlist_t *config = NULL;
6014
6015 if (zc->zc_nvlist_src == 0)
6016 return (SET_ERROR(EINVAL));
6017
6018 if ((error = get_nvlist(zc->zc_nvlist_src,
6019 zc->zc_nvlist_src_size, zc->zc_iflags, &policy)) == 0) {
6020 error = spa_open_rewind(zc->zc_name, &spa, FTAG,
6021 policy, &config);
6022 if (config != NULL) {
6023 int err;
6024
6025 if ((err = put_nvlist(zc, config)) != 0)
6026 error = err;
6027 nvlist_free(config);
6028 }
6029 nvlist_free(policy);
6030 }
6031 }
6032
6033 if (error != 0)
6034 return (error);
6035
6036 /*
6037 * If multihost is enabled, resuming I/O is unsafe as another
6038 * host may have imported the pool. Check for remote activity.
6039 */
6040 if (spa_multihost(spa) && spa_suspended(spa) &&
6041 spa_mmp_remote_host_activity(spa)) {
6042 spa_close(spa, FTAG);
6043 return (SET_ERROR(EREMOTEIO));
6044 }
6045
6046 spa_vdev_state_enter(spa, SCL_NONE);
6047
6048 if (zc->zc_guid == 0) {
6049 vd = NULL;
6050 } else {
6051 vd = spa_lookup_by_guid(spa, zc->zc_guid, B_TRUE);
6052 if (vd == NULL) {
6053 error = SET_ERROR(ENODEV);
6054 (void) spa_vdev_state_exit(spa, NULL, error);
6055 spa_close(spa, FTAG);
6056 return (error);
6057 }
6058 }
6059
6060 vdev_clear(spa, vd);
6061
6062 (void) spa_vdev_state_exit(spa, spa_suspended(spa) ?
6063 NULL : spa->spa_root_vdev, 0);
6064
6065 /*
6066 * Resume any suspended I/Os.
6067 */
6068 if (zio_resume(spa) != 0)
6069 error = SET_ERROR(EIO);
6070
6071 spa_close(spa, FTAG);
6072
6073 return (error);
6074 }
6075
6076 /*
6077 * Reopen all the vdevs associated with the pool.
6078 *
6079 * innvl: {
6080 * "scrub_restart" -> when true and scrub is running, allow to restart
6081 * scrub as the side effect of the reopen (boolean).
6082 * }
6083 *
6084 * outnvl is unused
6085 */
6086 static const zfs_ioc_key_t zfs_keys_pool_reopen[] = {
6087 {"scrub_restart", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
6088 };
6089
6090 static int
zfs_ioc_pool_reopen(const char * pool,nvlist_t * innvl,nvlist_t * outnvl)6091 zfs_ioc_pool_reopen(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
6092 {
6093 (void) outnvl;
6094 spa_t *spa;
6095 int error;
6096 boolean_t rc, scrub_restart = B_TRUE;
6097
6098 if (innvl) {
6099 error = nvlist_lookup_boolean_value(innvl,
6100 "scrub_restart", &rc);
6101 if (error == 0)
6102 scrub_restart = rc;
6103 }
6104
6105 error = spa_open(pool, &spa, FTAG);
6106 if (error != 0)
6107 return (error);
6108
6109 spa_vdev_state_enter(spa, SCL_NONE);
6110
6111 /*
6112 * If the scrub_restart flag is B_FALSE and a scrub is already
6113 * in progress then set spa_scrub_reopen flag to B_TRUE so that
6114 * we don't restart the scrub as a side effect of the reopen.
6115 * Otherwise, let vdev_open() decided if a resilver is required.
6116 */
6117
6118 spa->spa_scrub_reopen = (!scrub_restart &&
6119 dsl_scan_scrubbing(spa->spa_dsl_pool));
6120 vdev_reopen(spa->spa_root_vdev);
6121 spa->spa_scrub_reopen = B_FALSE;
6122
6123 (void) spa_vdev_state_exit(spa, NULL, 0);
6124 spa_close(spa, FTAG);
6125 return (0);
6126 }
6127
6128 /*
6129 * inputs:
6130 * zc_name name of filesystem
6131 *
6132 * outputs:
6133 * zc_string name of conflicting snapshot, if there is one
6134 */
6135 static int
zfs_ioc_promote(zfs_cmd_t * zc)6136 zfs_ioc_promote(zfs_cmd_t *zc)
6137 {
6138 dsl_pool_t *dp;
6139 dsl_dataset_t *ds, *ods;
6140 char origin[ZFS_MAX_DATASET_NAME_LEN];
6141 char *cp;
6142 int error;
6143
6144 zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
6145 if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 ||
6146 strchr(zc->zc_name, '%'))
6147 return (SET_ERROR(EINVAL));
6148
6149 error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
6150 if (error != 0)
6151 return (error);
6152
6153 error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds);
6154 if (error != 0) {
6155 dsl_pool_rele(dp, FTAG);
6156 return (error);
6157 }
6158
6159 if (!dsl_dir_is_clone(ds->ds_dir)) {
6160 dsl_dataset_rele(ds, FTAG);
6161 dsl_pool_rele(dp, FTAG);
6162 return (SET_ERROR(EINVAL));
6163 }
6164
6165 error = dsl_dataset_hold_obj(dp,
6166 dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &ods);
6167 if (error != 0) {
6168 dsl_dataset_rele(ds, FTAG);
6169 dsl_pool_rele(dp, FTAG);
6170 return (error);
6171 }
6172
6173 dsl_dataset_name(ods, origin);
6174 dsl_dataset_rele(ods, FTAG);
6175 dsl_dataset_rele(ds, FTAG);
6176 dsl_pool_rele(dp, FTAG);
6177
6178 /*
6179 * We don't need to unmount *all* the origin fs's snapshots, but
6180 * it's easier.
6181 */
6182 cp = strchr(origin, '@');
6183 if (cp)
6184 *cp = '\0';
6185 (void) dmu_objset_find(origin,
6186 zfs_unmount_snap_cb, NULL, DS_FIND_SNAPSHOTS);
6187 return (dsl_dataset_promote(zc->zc_name, zc->zc_string));
6188 }
6189
6190 /*
6191 * Retrieve a single {user|group|project}{used|quota}@... property.
6192 *
6193 * inputs:
6194 * zc_name name of filesystem
6195 * zc_objset_type zfs_userquota_prop_t
6196 * zc_value domain name (eg. "S-1-234-567-89")
6197 * zc_guid RID/UID/GID
6198 *
6199 * outputs:
6200 * zc_cookie property value
6201 */
6202 static int
zfs_ioc_userspace_one(zfs_cmd_t * zc)6203 zfs_ioc_userspace_one(zfs_cmd_t *zc)
6204 {
6205 zfsvfs_t *zfsvfs;
6206 int error;
6207
6208 if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
6209 return (SET_ERROR(EINVAL));
6210
6211 error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
6212 if (error != 0)
6213 return (error);
6214
6215 error = zfs_userspace_one(zfsvfs,
6216 zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie);
6217 zfsvfs_rele(zfsvfs, FTAG);
6218
6219 return (error);
6220 }
6221
6222 /*
6223 * inputs:
6224 * zc_name name of filesystem
6225 * zc_cookie zap cursor
6226 * zc_objset_type zfs_userquota_prop_t
6227 * zc_nvlist_dst[_size] buffer to fill (not really an nvlist)
6228 *
6229 * outputs:
6230 * zc_nvlist_dst[_size] data buffer (array of zfs_useracct_t)
6231 * zc_cookie zap cursor
6232 */
6233 static int
zfs_ioc_userspace_many(zfs_cmd_t * zc)6234 zfs_ioc_userspace_many(zfs_cmd_t *zc)
6235 {
6236 zfsvfs_t *zfsvfs;
6237 int bufsize = zc->zc_nvlist_dst_size;
6238
6239 if (bufsize <= 0)
6240 return (SET_ERROR(ENOMEM));
6241
6242 int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
6243 if (error != 0)
6244 return (error);
6245
6246 void *buf = vmem_alloc(bufsize, KM_SLEEP);
6247
6248 error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie,
6249 buf, &zc->zc_nvlist_dst_size);
6250
6251 if (error == 0) {
6252 error = xcopyout(buf,
6253 (void *)(uintptr_t)zc->zc_nvlist_dst,
6254 zc->zc_nvlist_dst_size);
6255 }
6256 vmem_free(buf, bufsize);
6257 zfsvfs_rele(zfsvfs, FTAG);
6258
6259 return (error);
6260 }
6261
6262 /*
6263 * inputs:
6264 * zc_name name of filesystem
6265 *
6266 * outputs:
6267 * none
6268 */
6269 static int
zfs_ioc_userspace_upgrade(zfs_cmd_t * zc)6270 zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
6271 {
6272 int error = 0;
6273 zfsvfs_t *zfsvfs;
6274
6275 if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) {
6276 if (!dmu_objset_userused_enabled(zfsvfs->z_os)) {
6277 /*
6278 * If userused is not enabled, it may be because the
6279 * objset needs to be closed & reopened (to grow the
6280 * objset_phys_t). Suspend/resume the fs will do that.
6281 */
6282 dsl_dataset_t *ds, *newds;
6283
6284 ds = dmu_objset_ds(zfsvfs->z_os);
6285 error = zfs_suspend_fs(zfsvfs);
6286 if (error == 0) {
6287 dmu_objset_refresh_ownership(ds, &newds,
6288 B_TRUE, zfsvfs);
6289 error = zfs_resume_fs(zfsvfs, newds);
6290 }
6291 }
6292 if (error == 0) {
6293 mutex_enter(&zfsvfs->z_os->os_upgrade_lock);
6294 if (zfsvfs->z_os->os_upgrade_id == 0) {
6295 /* clear potential error code and retry */
6296 zfsvfs->z_os->os_upgrade_status = 0;
6297 mutex_exit(&zfsvfs->z_os->os_upgrade_lock);
6298
6299 dsl_pool_config_enter(
6300 dmu_objset_pool(zfsvfs->z_os), FTAG);
6301 dmu_objset_userspace_upgrade(zfsvfs->z_os);
6302 dsl_pool_config_exit(
6303 dmu_objset_pool(zfsvfs->z_os), FTAG);
6304 } else {
6305 mutex_exit(&zfsvfs->z_os->os_upgrade_lock);
6306 }
6307
6308 taskq_wait_id(zfsvfs->z_os->os_spa->spa_upgrade_taskq,
6309 zfsvfs->z_os->os_upgrade_id);
6310 error = zfsvfs->z_os->os_upgrade_status;
6311 }
6312 zfs_vfs_rele(zfsvfs);
6313 } else {
6314 objset_t *os;
6315
6316 /* XXX kind of reading contents without owning */
6317 error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os);
6318 if (error != 0)
6319 return (error);
6320
6321 mutex_enter(&os->os_upgrade_lock);
6322 if (os->os_upgrade_id == 0) {
6323 /* clear potential error code and retry */
6324 os->os_upgrade_status = 0;
6325 mutex_exit(&os->os_upgrade_lock);
6326
6327 dmu_objset_userspace_upgrade(os);
6328 } else {
6329 mutex_exit(&os->os_upgrade_lock);
6330 }
6331
6332 dsl_pool_rele(dmu_objset_pool(os), FTAG);
6333
6334 taskq_wait_id(os->os_spa->spa_upgrade_taskq, os->os_upgrade_id);
6335 error = os->os_upgrade_status;
6336
6337 dsl_dataset_rele_flags(dmu_objset_ds(os), DS_HOLD_FLAG_DECRYPT,
6338 FTAG);
6339 }
6340 return (error);
6341 }
6342
6343 /*
6344 * inputs:
6345 * zc_name name of filesystem
6346 *
6347 * outputs:
6348 * none
6349 */
6350 static int
zfs_ioc_id_quota_upgrade(zfs_cmd_t * zc)6351 zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc)
6352 {
6353 objset_t *os;
6354 int error;
6355
6356 error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os);
6357 if (error != 0)
6358 return (error);
6359
6360 if (dmu_objset_userobjspace_upgradable(os) ||
6361 dmu_objset_projectquota_upgradable(os)) {
6362 mutex_enter(&os->os_upgrade_lock);
6363 if (os->os_upgrade_id == 0) {
6364 /* clear potential error code and retry */
6365 os->os_upgrade_status = 0;
6366 mutex_exit(&os->os_upgrade_lock);
6367
6368 dmu_objset_id_quota_upgrade(os);
6369 } else {
6370 mutex_exit(&os->os_upgrade_lock);
6371 }
6372
6373 dsl_pool_rele(dmu_objset_pool(os), FTAG);
6374
6375 taskq_wait_id(os->os_spa->spa_upgrade_taskq, os->os_upgrade_id);
6376 error = os->os_upgrade_status;
6377 } else {
6378 dsl_pool_rele(dmu_objset_pool(os), FTAG);
6379 }
6380
6381 dsl_dataset_rele_flags(dmu_objset_ds(os), DS_HOLD_FLAG_DECRYPT, FTAG);
6382
6383 return (error);
6384 }
6385
6386 static int
zfs_ioc_share(zfs_cmd_t * zc)6387 zfs_ioc_share(zfs_cmd_t *zc)
6388 {
6389 return (SET_ERROR(ENOSYS));
6390 }
6391
6392 /*
6393 * inputs:
6394 * zc_name name of containing filesystem
6395 * zc_obj object # beyond which we want next in-use object #
6396 *
6397 * outputs:
6398 * zc_obj next in-use object #
6399 */
6400 static int
zfs_ioc_next_obj(zfs_cmd_t * zc)6401 zfs_ioc_next_obj(zfs_cmd_t *zc)
6402 {
6403 objset_t *os = NULL;
6404 int error;
6405
6406 error = dmu_objset_hold(zc->zc_name, FTAG, &os);
6407 if (error != 0)
6408 return (error);
6409
6410 error = dmu_object_next(os, &zc->zc_obj, B_FALSE, 0);
6411
6412 dmu_objset_rele(os, FTAG);
6413 return (error);
6414 }
6415
6416 /*
6417 * inputs:
6418 * zc_name name of filesystem
6419 * zc_value prefix name for snapshot
6420 * zc_cleanup_fd cleanup-on-exit file descriptor for calling process
6421 *
6422 * outputs:
6423 * zc_value short name of new snapshot
6424 */
6425 static int
zfs_ioc_tmp_snapshot(zfs_cmd_t * zc)6426 zfs_ioc_tmp_snapshot(zfs_cmd_t *zc)
6427 {
6428 char *snap_name;
6429 char *hold_name;
6430 minor_t minor;
6431
6432 zfs_file_t *fp = zfs_onexit_fd_hold(zc->zc_cleanup_fd, &minor);
6433 if (fp == NULL)
6434 return (SET_ERROR(EBADF));
6435
6436 snap_name = kmem_asprintf("%s-%016llx", zc->zc_value,
6437 (u_longlong_t)ddi_get_lbolt64());
6438 hold_name = kmem_asprintf("%%%s", zc->zc_value);
6439
6440 int error = dsl_dataset_snapshot_tmp(zc->zc_name, snap_name, minor,
6441 hold_name);
6442 if (error == 0)
6443 (void) strlcpy(zc->zc_value, snap_name,
6444 sizeof (zc->zc_value));
6445 kmem_strfree(snap_name);
6446 kmem_strfree(hold_name);
6447 zfs_onexit_fd_rele(fp);
6448 return (error);
6449 }
6450
6451 /*
6452 * inputs:
6453 * zc_name name of "to" snapshot
6454 * zc_value name of "from" snapshot
6455 * zc_cookie file descriptor to write diff data on
6456 *
6457 * outputs:
6458 * dmu_diff_record_t's to the file descriptor
6459 */
6460 static int
zfs_ioc_diff(zfs_cmd_t * zc)6461 zfs_ioc_diff(zfs_cmd_t *zc)
6462 {
6463 zfs_file_t *fp;
6464 offset_t off;
6465 int error;
6466
6467 if ((fp = zfs_file_get(zc->zc_cookie)) == NULL)
6468 return (SET_ERROR(EBADF));
6469
6470 off = zfs_file_off(fp);
6471 error = dmu_diff(zc->zc_name, zc->zc_value, fp, &off);
6472
6473 zfs_file_put(fp);
6474
6475 return (error);
6476 }
6477
6478 static int
zfs_ioc_smb_acl(zfs_cmd_t * zc)6479 zfs_ioc_smb_acl(zfs_cmd_t *zc)
6480 {
6481 return (SET_ERROR(ENOTSUP));
6482 }
6483
6484 /*
6485 * innvl: {
6486 * "holds" -> { snapname -> holdname (string), ... }
6487 * (optional) "cleanup_fd" -> fd (int32)
6488 * }
6489 *
6490 * outnvl: {
6491 * snapname -> error value (int32)
6492 * ...
6493 * }
6494 */
6495 static const zfs_ioc_key_t zfs_keys_hold[] = {
6496 {"holds", DATA_TYPE_NVLIST, 0},
6497 {"cleanup_fd", DATA_TYPE_INT32, ZK_OPTIONAL},
6498 };
6499
6500 static int
zfs_ioc_hold(const char * pool,nvlist_t * args,nvlist_t * errlist)6501 zfs_ioc_hold(const char *pool, nvlist_t *args, nvlist_t *errlist)
6502 {
6503 (void) pool;
6504 nvpair_t *pair;
6505 nvlist_t *holds;
6506 int cleanup_fd = -1;
6507 int error;
6508 minor_t minor = 0;
6509 zfs_file_t *fp = NULL;
6510
6511 holds = fnvlist_lookup_nvlist(args, "holds");
6512
6513 /* make sure the user didn't pass us any invalid (empty) tags */
6514 for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
6515 pair = nvlist_next_nvpair(holds, pair)) {
6516 const char *htag;
6517
6518 error = nvpair_value_string(pair, &htag);
6519 if (error != 0)
6520 return (SET_ERROR(error));
6521
6522 if (strlen(htag) == 0)
6523 return (SET_ERROR(EINVAL));
6524 }
6525
6526 if (nvlist_lookup_int32(args, "cleanup_fd", &cleanup_fd) == 0) {
6527 fp = zfs_onexit_fd_hold(cleanup_fd, &minor);
6528 if (fp == NULL)
6529 return (SET_ERROR(EBADF));
6530 }
6531
6532 error = dsl_dataset_user_hold(holds, minor, errlist);
6533 if (fp != NULL) {
6534 ASSERT3U(minor, !=, 0);
6535 zfs_onexit_fd_rele(fp);
6536 }
6537 return (SET_ERROR(error));
6538 }
6539
6540 /*
6541 * innvl is not used.
6542 *
6543 * outnvl: {
6544 * holdname -> time added (uint64 seconds since epoch)
6545 * ...
6546 * }
6547 */
6548 static const zfs_ioc_key_t zfs_keys_get_holds[] = {
6549 /* no nvl keys */
6550 };
6551
6552 static int
zfs_ioc_get_holds(const char * snapname,nvlist_t * args,nvlist_t * outnvl)6553 zfs_ioc_get_holds(const char *snapname, nvlist_t *args, nvlist_t *outnvl)
6554 {
6555 (void) args;
6556 return (dsl_dataset_get_holds(snapname, outnvl));
6557 }
6558
6559 /*
6560 * innvl: {
6561 * snapname -> { holdname, ... }
6562 * ...
6563 * }
6564 *
6565 * outnvl: {
6566 * snapname -> error value (int32)
6567 * ...
6568 * }
6569 */
6570 static const zfs_ioc_key_t zfs_keys_release[] = {
6571 {"<snapname>...", DATA_TYPE_NVLIST, ZK_WILDCARDLIST},
6572 };
6573
6574 static int
zfs_ioc_release(const char * pool,nvlist_t * holds,nvlist_t * errlist)6575 zfs_ioc_release(const char *pool, nvlist_t *holds, nvlist_t *errlist)
6576 {
6577 (void) pool;
6578 return (dsl_dataset_user_release(holds, errlist));
6579 }
6580
6581 /*
6582 * inputs:
6583 * zc_guid flags (ZEVENT_NONBLOCK)
6584 * zc_cleanup_fd zevent file descriptor
6585 *
6586 * outputs:
6587 * zc_nvlist_dst next nvlist event
6588 * zc_cookie dropped events since last get
6589 */
6590 static int
zfs_ioc_events_next(zfs_cmd_t * zc)6591 zfs_ioc_events_next(zfs_cmd_t *zc)
6592 {
6593 zfs_zevent_t *ze;
6594 nvlist_t *event = NULL;
6595 minor_t minor;
6596 uint64_t dropped = 0;
6597 int error;
6598
6599 zfs_file_t *fp = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze);
6600 if (fp == NULL)
6601 return (SET_ERROR(EBADF));
6602
6603 do {
6604 error = zfs_zevent_next(ze, &event,
6605 &zc->zc_nvlist_dst_size, &dropped);
6606 if (event != NULL) {
6607 zc->zc_cookie = dropped;
6608 error = put_nvlist(zc, event);
6609 nvlist_free(event);
6610 }
6611
6612 if (zc->zc_guid & ZEVENT_NONBLOCK)
6613 break;
6614
6615 if ((error == 0) || (error != ENOENT))
6616 break;
6617
6618 error = zfs_zevent_wait(ze);
6619 if (error != 0)
6620 break;
6621 } while (1);
6622
6623 zfs_zevent_fd_rele(fp);
6624
6625 return (error);
6626 }
6627
6628 /*
6629 * outputs:
6630 * zc_cookie cleared events count
6631 */
6632 static int
zfs_ioc_events_clear(zfs_cmd_t * zc)6633 zfs_ioc_events_clear(zfs_cmd_t *zc)
6634 {
6635 uint_t count;
6636
6637 zfs_zevent_drain_all(&count);
6638 zc->zc_cookie = count;
6639
6640 return (0);
6641 }
6642
6643 /*
6644 * inputs:
6645 * zc_guid eid | ZEVENT_SEEK_START | ZEVENT_SEEK_END
6646 * zc_cleanup zevent file descriptor
6647 */
6648 static int
zfs_ioc_events_seek(zfs_cmd_t * zc)6649 zfs_ioc_events_seek(zfs_cmd_t *zc)
6650 {
6651 zfs_zevent_t *ze;
6652 minor_t minor;
6653 int error;
6654
6655 zfs_file_t *fp = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze);
6656 if (fp == NULL)
6657 return (SET_ERROR(EBADF));
6658
6659 error = zfs_zevent_seek(ze, zc->zc_guid);
6660 zfs_zevent_fd_rele(fp);
6661
6662 return (error);
6663 }
6664
6665 /*
6666 * inputs:
6667 * zc_name name of later filesystem or snapshot
6668 * zc_value full name of old snapshot or bookmark
6669 *
6670 * outputs:
6671 * zc_cookie space in bytes
6672 * zc_objset_type compressed space in bytes
6673 * zc_perm_action uncompressed space in bytes
6674 */
6675 static int
zfs_ioc_space_written(zfs_cmd_t * zc)6676 zfs_ioc_space_written(zfs_cmd_t *zc)
6677 {
6678 int error;
6679 dsl_pool_t *dp;
6680 dsl_dataset_t *new;
6681
6682 error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
6683 if (error != 0)
6684 return (error);
6685 error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &new);
6686 if (error != 0) {
6687 dsl_pool_rele(dp, FTAG);
6688 return (error);
6689 }
6690 if (strchr(zc->zc_value, '#') != NULL) {
6691 zfs_bookmark_phys_t bmp;
6692 error = dsl_bookmark_lookup(dp, zc->zc_value,
6693 new, &bmp);
6694 if (error == 0) {
6695 error = dsl_dataset_space_written_bookmark(&bmp, new,
6696 &zc->zc_cookie,
6697 &zc->zc_objset_type, &zc->zc_perm_action);
6698 }
6699 } else {
6700 dsl_dataset_t *old;
6701 error = dsl_dataset_hold(dp, zc->zc_value, FTAG, &old);
6702
6703 if (error == 0) {
6704 error = dsl_dataset_space_written(old, new,
6705 &zc->zc_cookie,
6706 &zc->zc_objset_type, &zc->zc_perm_action);
6707 dsl_dataset_rele(old, FTAG);
6708 }
6709 }
6710 dsl_dataset_rele(new, FTAG);
6711 dsl_pool_rele(dp, FTAG);
6712 return (error);
6713 }
6714
6715 /*
6716 * innvl: {
6717 * "firstsnap" -> snapshot name
6718 * }
6719 *
6720 * outnvl: {
6721 * "used" -> space in bytes
6722 * "compressed" -> compressed space in bytes
6723 * "uncompressed" -> uncompressed space in bytes
6724 * }
6725 */
6726 static const zfs_ioc_key_t zfs_keys_space_snaps[] = {
6727 {"firstsnap", DATA_TYPE_STRING, 0},
6728 };
6729
6730 static int
zfs_ioc_space_snaps(const char * lastsnap,nvlist_t * innvl,nvlist_t * outnvl)6731 zfs_ioc_space_snaps(const char *lastsnap, nvlist_t *innvl, nvlist_t *outnvl)
6732 {
6733 int error;
6734 dsl_pool_t *dp;
6735 dsl_dataset_t *new, *old;
6736 const char *firstsnap;
6737 uint64_t used, comp, uncomp;
6738
6739 firstsnap = fnvlist_lookup_string(innvl, "firstsnap");
6740
6741 error = dsl_pool_hold(lastsnap, FTAG, &dp);
6742 if (error != 0)
6743 return (error);
6744
6745 error = dsl_dataset_hold(dp, lastsnap, FTAG, &new);
6746 if (error == 0 && !new->ds_is_snapshot) {
6747 dsl_dataset_rele(new, FTAG);
6748 error = SET_ERROR(EINVAL);
6749 }
6750 if (error != 0) {
6751 dsl_pool_rele(dp, FTAG);
6752 return (error);
6753 }
6754 error = dsl_dataset_hold(dp, firstsnap, FTAG, &old);
6755 if (error == 0 && !old->ds_is_snapshot) {
6756 dsl_dataset_rele(old, FTAG);
6757 error = SET_ERROR(EINVAL);
6758 }
6759 if (error != 0) {
6760 dsl_dataset_rele(new, FTAG);
6761 dsl_pool_rele(dp, FTAG);
6762 return (error);
6763 }
6764
6765 error = dsl_dataset_space_wouldfree(old, new, &used, &comp, &uncomp);
6766 dsl_dataset_rele(old, FTAG);
6767 dsl_dataset_rele(new, FTAG);
6768 dsl_pool_rele(dp, FTAG);
6769 fnvlist_add_uint64(outnvl, "used", used);
6770 fnvlist_add_uint64(outnvl, "compressed", comp);
6771 fnvlist_add_uint64(outnvl, "uncompressed", uncomp);
6772 return (error);
6773 }
6774
6775 /*
6776 * innvl: {
6777 * "fd" -> file descriptor to write stream to (int32)
6778 * (optional) "fromsnap" -> full snap name to send an incremental from
6779 * (optional) "largeblockok" -> (value ignored)
6780 * indicates that blocks > 128KB are permitted
6781 * (optional) "embedok" -> (value ignored)
6782 * presence indicates DRR_WRITE_EMBEDDED records are permitted
6783 * (optional) "compressok" -> (value ignored)
6784 * presence indicates compressed DRR_WRITE records are permitted
6785 * (optional) "rawok" -> (value ignored)
6786 * presence indicates raw encrypted records should be used.
6787 * (optional) "savedok" -> (value ignored)
6788 * presence indicates we should send a partially received snapshot
6789 * (optional) "resume_object" and "resume_offset" -> (uint64)
6790 * if present, resume send stream from specified object and offset.
6791 * (optional) "redactbook" -> (string)
6792 * if present, use this bookmark's redaction list to generate a redacted
6793 * send stream
6794 * }
6795 *
6796 * outnvl is unused
6797 */
6798 static const zfs_ioc_key_t zfs_keys_send_new[] = {
6799 {"fd", DATA_TYPE_INT32, 0},
6800 {"fromsnap", DATA_TYPE_STRING, ZK_OPTIONAL},
6801 {"largeblockok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6802 {"embedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6803 {"compressok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6804 {"rawok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6805 {"savedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6806 {"resume_object", DATA_TYPE_UINT64, ZK_OPTIONAL},
6807 {"resume_offset", DATA_TYPE_UINT64, ZK_OPTIONAL},
6808 {"redactbook", DATA_TYPE_STRING, ZK_OPTIONAL},
6809 };
6810
6811 static int
zfs_ioc_send_new(const char * snapname,nvlist_t * innvl,nvlist_t * outnvl)6812 zfs_ioc_send_new(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
6813 {
6814 (void) outnvl;
6815 int error;
6816 offset_t off;
6817 const char *fromname = NULL;
6818 int fd;
6819 boolean_t largeblockok;
6820 boolean_t embedok;
6821 boolean_t compressok;
6822 boolean_t rawok;
6823 boolean_t savedok;
6824 uint64_t resumeobj = 0;
6825 uint64_t resumeoff = 0;
6826 const char *redactbook = NULL;
6827
6828 fd = fnvlist_lookup_int32(innvl, "fd");
6829
6830 (void) nvlist_lookup_string(innvl, "fromsnap", &fromname);
6831
6832 largeblockok = nvlist_exists(innvl, "largeblockok");
6833 embedok = nvlist_exists(innvl, "embedok");
6834 compressok = nvlist_exists(innvl, "compressok");
6835 rawok = nvlist_exists(innvl, "rawok");
6836 savedok = nvlist_exists(innvl, "savedok");
6837
6838 (void) nvlist_lookup_uint64(innvl, "resume_object", &resumeobj);
6839 (void) nvlist_lookup_uint64(innvl, "resume_offset", &resumeoff);
6840
6841 (void) nvlist_lookup_string(innvl, "redactbook", &redactbook);
6842
6843 dump_bytes_arg_t dba;
6844 dmu_send_outparams_t out;
6845 error = dump_bytes_init(&dba, fd, &out);
6846 if (error)
6847 return (error);
6848
6849 off = zfs_file_off(dba.dba_fp);
6850 error = dmu_send(snapname, fromname, embedok, largeblockok,
6851 compressok, rawok, savedok, resumeobj, resumeoff,
6852 redactbook, fd, &off, &out);
6853
6854 dump_bytes_fini(&dba);
6855
6856 return (error);
6857 }
6858
6859 static int
send_space_sum(objset_t * os,void * buf,int len,void * arg)6860 send_space_sum(objset_t *os, void *buf, int len, void *arg)
6861 {
6862 (void) os, (void) buf;
6863 uint64_t *size = arg;
6864
6865 *size += len;
6866 return (0);
6867 }
6868
6869 /*
6870 * Determine approximately how large a zfs send stream will be -- the number
6871 * of bytes that will be written to the fd supplied to zfs_ioc_send_new().
6872 *
6873 * innvl: {
6874 * (optional) "from" -> full snap or bookmark name to send an incremental
6875 * from
6876 * (optional) "largeblockok" -> (value ignored)
6877 * indicates that blocks > 128KB are permitted
6878 * (optional) "embedok" -> (value ignored)
6879 * presence indicates DRR_WRITE_EMBEDDED records are permitted
6880 * (optional) "compressok" -> (value ignored)
6881 * presence indicates compressed DRR_WRITE records are permitted
6882 * (optional) "rawok" -> (value ignored)
6883 * presence indicates raw encrypted records should be used.
6884 * (optional) "resume_object" and "resume_offset" -> (uint64)
6885 * if present, resume send stream from specified object and offset.
6886 * (optional) "fd" -> file descriptor to use as a cookie for progress
6887 * tracking (int32)
6888 * }
6889 *
6890 * outnvl: {
6891 * "space" -> bytes of space (uint64)
6892 * }
6893 */
6894 static const zfs_ioc_key_t zfs_keys_send_space[] = {
6895 {"from", DATA_TYPE_STRING, ZK_OPTIONAL},
6896 {"fromsnap", DATA_TYPE_STRING, ZK_OPTIONAL},
6897 {"largeblockok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6898 {"embedok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6899 {"compressok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6900 {"rawok", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
6901 {"fd", DATA_TYPE_INT32, ZK_OPTIONAL},
6902 {"redactbook", DATA_TYPE_STRING, ZK_OPTIONAL},
6903 {"resume_object", DATA_TYPE_UINT64, ZK_OPTIONAL},
6904 {"resume_offset", DATA_TYPE_UINT64, ZK_OPTIONAL},
6905 {"bytes", DATA_TYPE_UINT64, ZK_OPTIONAL},
6906 };
6907
6908 static int
zfs_ioc_send_space(const char * snapname,nvlist_t * innvl,nvlist_t * outnvl)6909 zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
6910 {
6911 dsl_pool_t *dp;
6912 dsl_dataset_t *tosnap;
6913 dsl_dataset_t *fromsnap = NULL;
6914 int error;
6915 const char *fromname = NULL;
6916 const char *redactlist_book = NULL;
6917 boolean_t largeblockok;
6918 boolean_t embedok;
6919 boolean_t compressok;
6920 boolean_t rawok;
6921 boolean_t savedok;
6922 uint64_t space = 0;
6923 boolean_t full_estimate = B_FALSE;
6924 uint64_t resumeobj = 0;
6925 uint64_t resumeoff = 0;
6926 uint64_t resume_bytes = 0;
6927 int32_t fd = -1;
6928 zfs_bookmark_phys_t zbm = {0};
6929
6930 error = dsl_pool_hold(snapname, FTAG, &dp);
6931 if (error != 0)
6932 return (error);
6933
6934 error = dsl_dataset_hold(dp, snapname, FTAG, &tosnap);
6935 if (error != 0) {
6936 dsl_pool_rele(dp, FTAG);
6937 return (error);
6938 }
6939 (void) nvlist_lookup_int32(innvl, "fd", &fd);
6940
6941 largeblockok = nvlist_exists(innvl, "largeblockok");
6942 embedok = nvlist_exists(innvl, "embedok");
6943 compressok = nvlist_exists(innvl, "compressok");
6944 rawok = nvlist_exists(innvl, "rawok");
6945 savedok = nvlist_exists(innvl, "savedok");
6946 boolean_t from = (nvlist_lookup_string(innvl, "from", &fromname) == 0);
6947 boolean_t altbook = (nvlist_lookup_string(innvl, "redactbook",
6948 &redactlist_book) == 0);
6949
6950 (void) nvlist_lookup_uint64(innvl, "resume_object", &resumeobj);
6951 (void) nvlist_lookup_uint64(innvl, "resume_offset", &resumeoff);
6952 (void) nvlist_lookup_uint64(innvl, "bytes", &resume_bytes);
6953
6954 if (altbook) {
6955 full_estimate = B_TRUE;
6956 } else if (from) {
6957 if (strchr(fromname, '#')) {
6958 error = dsl_bookmark_lookup(dp, fromname, tosnap, &zbm);
6959
6960 /*
6961 * dsl_bookmark_lookup() will fail with EXDEV if
6962 * the from-bookmark and tosnap are at the same txg.
6963 * However, it's valid to do a send (and therefore,
6964 * a send estimate) from and to the same time point,
6965 * if the bookmark is redacted (the incremental send
6966 * can change what's redacted on the target). In
6967 * this case, dsl_bookmark_lookup() fills in zbm
6968 * but returns EXDEV. Ignore this error.
6969 */
6970 if (error == EXDEV && zbm.zbm_redaction_obj != 0 &&
6971 zbm.zbm_guid ==
6972 dsl_dataset_phys(tosnap)->ds_guid)
6973 error = 0;
6974
6975 if (error != 0) {
6976 dsl_dataset_rele(tosnap, FTAG);
6977 dsl_pool_rele(dp, FTAG);
6978 return (error);
6979 }
6980 if (zbm.zbm_redaction_obj != 0 || !(zbm.zbm_flags &
6981 ZBM_FLAG_HAS_FBN)) {
6982 full_estimate = B_TRUE;
6983 }
6984 } else if (strchr(fromname, '@')) {
6985 error = dsl_dataset_hold(dp, fromname, FTAG, &fromsnap);
6986 if (error != 0) {
6987 dsl_dataset_rele(tosnap, FTAG);
6988 dsl_pool_rele(dp, FTAG);
6989 return (error);
6990 }
6991
6992 if (!dsl_dataset_is_before(tosnap, fromsnap, 0)) {
6993 full_estimate = B_TRUE;
6994 dsl_dataset_rele(fromsnap, FTAG);
6995 }
6996 } else {
6997 /*
6998 * from is not properly formatted as a snapshot or
6999 * bookmark
7000 */
7001 dsl_dataset_rele(tosnap, FTAG);
7002 dsl_pool_rele(dp, FTAG);
7003 return (SET_ERROR(EINVAL));
7004 }
7005 }
7006
7007 if (full_estimate) {
7008 dmu_send_outparams_t out = {0};
7009 offset_t off = 0;
7010 out.dso_outfunc = send_space_sum;
7011 out.dso_arg = &space;
7012 out.dso_dryrun = B_TRUE;
7013 /*
7014 * We have to release these holds so dmu_send can take them. It
7015 * will do all the error checking we need.
7016 */
7017 dsl_dataset_rele(tosnap, FTAG);
7018 dsl_pool_rele(dp, FTAG);
7019 error = dmu_send(snapname, fromname, embedok, largeblockok,
7020 compressok, rawok, savedok, resumeobj, resumeoff,
7021 redactlist_book, fd, &off, &out);
7022 } else {
7023 error = dmu_send_estimate_fast(tosnap, fromsnap,
7024 (from && strchr(fromname, '#') != NULL ? &zbm : NULL),
7025 compressok || rawok, savedok, &space);
7026 space -= resume_bytes;
7027 if (fromsnap != NULL)
7028 dsl_dataset_rele(fromsnap, FTAG);
7029 dsl_dataset_rele(tosnap, FTAG);
7030 dsl_pool_rele(dp, FTAG);
7031 }
7032
7033 fnvlist_add_uint64(outnvl, "space", space);
7034
7035 return (error);
7036 }
7037
7038 /*
7039 * Sync the currently open TXG to disk for the specified pool.
7040 * This is somewhat similar to 'zfs_sync()'.
7041 * For cases that do not result in error this ioctl will wait for
7042 * the currently open TXG to commit before returning back to the caller.
7043 *
7044 * innvl: {
7045 * "force" -> when true, force uberblock update even if there is no dirty data.
7046 * In addition this will cause the vdev configuration to be written
7047 * out including updating the zpool cache file. (boolean_t)
7048 * }
7049 *
7050 * onvl is unused
7051 */
7052 static const zfs_ioc_key_t zfs_keys_pool_sync[] = {
7053 {"force", DATA_TYPE_BOOLEAN_VALUE, 0},
7054 };
7055
7056 static int
zfs_ioc_pool_sync(const char * pool,nvlist_t * innvl,nvlist_t * onvl)7057 zfs_ioc_pool_sync(const char *pool, nvlist_t *innvl, nvlist_t *onvl)
7058 {
7059 (void) onvl;
7060 int err;
7061 boolean_t rc, force = B_FALSE;
7062 spa_t *spa;
7063
7064 if ((err = spa_open(pool, &spa, FTAG)) != 0)
7065 return (err);
7066
7067 if (innvl) {
7068 err = nvlist_lookup_boolean_value(innvl, "force", &rc);
7069 if (err == 0)
7070 force = rc;
7071 }
7072
7073 if (force) {
7074 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_WRITER);
7075 vdev_config_dirty(spa->spa_root_vdev);
7076 spa_config_exit(spa, SCL_CONFIG, FTAG);
7077 }
7078 txg_wait_synced(spa_get_dsl(spa), 0);
7079
7080 spa_close(spa, FTAG);
7081
7082 return (0);
7083 }
7084
7085 /*
7086 * Load a user's wrapping key into the kernel.
7087 * innvl: {
7088 * "hidden_args" -> { "wkeydata" -> value }
7089 * raw uint8_t array of encryption wrapping key data (32 bytes)
7090 * (optional) "noop" -> (value ignored)
7091 * presence indicated key should only be verified, not loaded
7092 * }
7093 */
7094 static const zfs_ioc_key_t zfs_keys_load_key[] = {
7095 {"hidden_args", DATA_TYPE_NVLIST, 0},
7096 {"noop", DATA_TYPE_BOOLEAN, ZK_OPTIONAL},
7097 };
7098
7099 static int
zfs_ioc_load_key(const char * dsname,nvlist_t * innvl,nvlist_t * outnvl)7100 zfs_ioc_load_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
7101 {
7102 (void) outnvl;
7103 int ret;
7104 dsl_crypto_params_t *dcp = NULL;
7105 nvlist_t *hidden_args;
7106 boolean_t noop = nvlist_exists(innvl, "noop");
7107
7108 if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
7109 ret = SET_ERROR(EINVAL);
7110 goto error;
7111 }
7112
7113 hidden_args = fnvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS);
7114
7115 ret = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
7116 hidden_args, &dcp);
7117 if (ret != 0)
7118 goto error;
7119
7120 ret = spa_keystore_load_wkey(dsname, dcp, noop);
7121 if (ret != 0)
7122 goto error;
7123
7124 dsl_crypto_params_free(dcp, noop);
7125
7126 return (0);
7127
7128 error:
7129 dsl_crypto_params_free(dcp, B_TRUE);
7130 return (ret);
7131 }
7132
7133 /*
7134 * Unload a user's wrapping key from the kernel.
7135 * Both innvl and outnvl are unused.
7136 */
7137 static const zfs_ioc_key_t zfs_keys_unload_key[] = {
7138 /* no nvl keys */
7139 };
7140
7141 static int
zfs_ioc_unload_key(const char * dsname,nvlist_t * innvl,nvlist_t * outnvl)7142 zfs_ioc_unload_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
7143 {
7144 (void) innvl, (void) outnvl;
7145 int ret = 0;
7146
7147 if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
7148 ret = (SET_ERROR(EINVAL));
7149 goto out;
7150 }
7151
7152 ret = spa_keystore_unload_wkey(dsname);
7153 if (ret != 0)
7154 goto out;
7155
7156 out:
7157 return (ret);
7158 }
7159
7160 /*
7161 * Changes a user's wrapping key used to decrypt a dataset. The keyformat,
7162 * keylocation, pbkdf2salt, and pbkdf2iters properties can also be specified
7163 * here to change how the key is derived in userspace.
7164 *
7165 * innvl: {
7166 * "hidden_args" (optional) -> { "wkeydata" -> value }
7167 * raw uint8_t array of new encryption wrapping key data (32 bytes)
7168 * "props" (optional) -> { prop -> value }
7169 * }
7170 *
7171 * outnvl is unused
7172 */
7173 static const zfs_ioc_key_t zfs_keys_change_key[] = {
7174 {"crypt_cmd", DATA_TYPE_UINT64, ZK_OPTIONAL},
7175 {"hidden_args", DATA_TYPE_NVLIST, ZK_OPTIONAL},
7176 {"props", DATA_TYPE_NVLIST, ZK_OPTIONAL},
7177 };
7178
7179 static int
zfs_ioc_change_key(const char * dsname,nvlist_t * innvl,nvlist_t * outnvl)7180 zfs_ioc_change_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
7181 {
7182 (void) outnvl;
7183 int ret;
7184 uint64_t cmd = DCP_CMD_NONE;
7185 dsl_crypto_params_t *dcp = NULL;
7186 nvlist_t *args = NULL, *hidden_args = NULL;
7187
7188 if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
7189 ret = (SET_ERROR(EINVAL));
7190 goto error;
7191 }
7192
7193 (void) nvlist_lookup_uint64(innvl, "crypt_cmd", &cmd);
7194 (void) nvlist_lookup_nvlist(innvl, "props", &args);
7195 (void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
7196
7197 ret = dsl_crypto_params_create_nvlist(cmd, args, hidden_args, &dcp);
7198 if (ret != 0)
7199 goto error;
7200
7201 ret = spa_keystore_change_key(dsname, dcp);
7202 if (ret != 0)
7203 goto error;
7204
7205 dsl_crypto_params_free(dcp, B_FALSE);
7206
7207 return (0);
7208
7209 error:
7210 dsl_crypto_params_free(dcp, B_TRUE);
7211 return (ret);
7212 }
7213
7214 static zfs_ioc_vec_t zfs_ioc_vec[ZFS_IOC_LAST - ZFS_IOC_FIRST];
7215
7216 static void
zfs_ioctl_register_legacy(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func,zfs_secpolicy_func_t * secpolicy,zfs_ioc_namecheck_t namecheck,boolean_t log_history,zfs_ioc_poolcheck_t pool_check)7217 zfs_ioctl_register_legacy(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
7218 zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck,
7219 boolean_t log_history, zfs_ioc_poolcheck_t pool_check)
7220 {
7221 zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST];
7222
7223 ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
7224 ASSERT3U(ioc, <, ZFS_IOC_LAST);
7225 ASSERT3P(vec->zvec_legacy_func, ==, NULL);
7226 ASSERT3P(vec->zvec_func, ==, NULL);
7227
7228 vec->zvec_legacy_func = func;
7229 vec->zvec_secpolicy = secpolicy;
7230 vec->zvec_namecheck = namecheck;
7231 vec->zvec_allow_log = log_history;
7232 vec->zvec_pool_check = pool_check;
7233 }
7234
7235 /*
7236 * See the block comment at the beginning of this file for details on
7237 * each argument to this function.
7238 */
7239 void
zfs_ioctl_register(const char * name,zfs_ioc_t ioc,zfs_ioc_func_t * func,zfs_secpolicy_func_t * secpolicy,zfs_ioc_namecheck_t namecheck,zfs_ioc_poolcheck_t pool_check,boolean_t smush_outnvlist,boolean_t allow_log,const zfs_ioc_key_t * nvl_keys,size_t num_keys)7240 zfs_ioctl_register(const char *name, zfs_ioc_t ioc, zfs_ioc_func_t *func,
7241 zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck,
7242 zfs_ioc_poolcheck_t pool_check, boolean_t smush_outnvlist,
7243 boolean_t allow_log, const zfs_ioc_key_t *nvl_keys, size_t num_keys)
7244 {
7245 zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST];
7246
7247 ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
7248 ASSERT3U(ioc, <, ZFS_IOC_LAST);
7249 ASSERT3P(vec->zvec_legacy_func, ==, NULL);
7250 ASSERT3P(vec->zvec_func, ==, NULL);
7251
7252 /* if we are logging, the name must be valid */
7253 ASSERT(!allow_log || namecheck != NO_NAME);
7254
7255 vec->zvec_name = name;
7256 vec->zvec_func = func;
7257 vec->zvec_secpolicy = secpolicy;
7258 vec->zvec_namecheck = namecheck;
7259 vec->zvec_pool_check = pool_check;
7260 vec->zvec_smush_outnvlist = smush_outnvlist;
7261 vec->zvec_allow_log = allow_log;
7262 vec->zvec_nvl_keys = nvl_keys;
7263 vec->zvec_nvl_key_count = num_keys;
7264 }
7265
7266 static void
zfs_ioctl_register_pool(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func,zfs_secpolicy_func_t * secpolicy,boolean_t log_history,zfs_ioc_poolcheck_t pool_check)7267 zfs_ioctl_register_pool(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
7268 zfs_secpolicy_func_t *secpolicy, boolean_t log_history,
7269 zfs_ioc_poolcheck_t pool_check)
7270 {
7271 zfs_ioctl_register_legacy(ioc, func, secpolicy,
7272 POOL_NAME, log_history, pool_check);
7273 }
7274
7275 void
zfs_ioctl_register_dataset_nolog(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func,zfs_secpolicy_func_t * secpolicy,zfs_ioc_poolcheck_t pool_check)7276 zfs_ioctl_register_dataset_nolog(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
7277 zfs_secpolicy_func_t *secpolicy, zfs_ioc_poolcheck_t pool_check)
7278 {
7279 zfs_ioctl_register_legacy(ioc, func, secpolicy,
7280 DATASET_NAME, B_FALSE, pool_check);
7281 }
7282
7283 static void
zfs_ioctl_register_pool_modify(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func)7284 zfs_ioctl_register_pool_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func)
7285 {
7286 zfs_ioctl_register_legacy(ioc, func, zfs_secpolicy_config,
7287 POOL_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
7288 }
7289
7290 static void
zfs_ioctl_register_pool_meta(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func,zfs_secpolicy_func_t * secpolicy)7291 zfs_ioctl_register_pool_meta(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
7292 zfs_secpolicy_func_t *secpolicy)
7293 {
7294 zfs_ioctl_register_legacy(ioc, func, secpolicy,
7295 NO_NAME, B_FALSE, POOL_CHECK_NONE);
7296 }
7297
7298 static void
zfs_ioctl_register_dataset_read_secpolicy(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func,zfs_secpolicy_func_t * secpolicy)7299 zfs_ioctl_register_dataset_read_secpolicy(zfs_ioc_t ioc,
7300 zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy)
7301 {
7302 zfs_ioctl_register_legacy(ioc, func, secpolicy,
7303 DATASET_NAME, B_FALSE, POOL_CHECK_SUSPENDED);
7304 }
7305
7306 static void
zfs_ioctl_register_dataset_read(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func)7307 zfs_ioctl_register_dataset_read(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func)
7308 {
7309 zfs_ioctl_register_dataset_read_secpolicy(ioc, func,
7310 zfs_secpolicy_read);
7311 }
7312
7313 static void
zfs_ioctl_register_dataset_modify(zfs_ioc_t ioc,zfs_ioc_legacy_func_t * func,zfs_secpolicy_func_t * secpolicy)7314 zfs_ioctl_register_dataset_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
7315 zfs_secpolicy_func_t *secpolicy)
7316 {
7317 zfs_ioctl_register_legacy(ioc, func, secpolicy,
7318 DATASET_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
7319 }
7320
7321 static void
zfs_ioctl_init(void)7322 zfs_ioctl_init(void)
7323 {
7324 zfs_ioctl_register("snapshot", ZFS_IOC_SNAPSHOT,
7325 zfs_ioc_snapshot, zfs_secpolicy_snapshot, POOL_NAME,
7326 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7327 zfs_keys_snapshot, ARRAY_SIZE(zfs_keys_snapshot));
7328
7329 zfs_ioctl_register("log_history", ZFS_IOC_LOG_HISTORY,
7330 zfs_ioc_log_history, zfs_secpolicy_log_history, NO_NAME,
7331 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
7332 zfs_keys_log_history, ARRAY_SIZE(zfs_keys_log_history));
7333
7334 zfs_ioctl_register("space_snaps", ZFS_IOC_SPACE_SNAPS,
7335 zfs_ioc_space_snaps, zfs_secpolicy_read, DATASET_NAME,
7336 POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
7337 zfs_keys_space_snaps, ARRAY_SIZE(zfs_keys_space_snaps));
7338
7339 zfs_ioctl_register("send", ZFS_IOC_SEND_NEW,
7340 zfs_ioc_send_new, zfs_secpolicy_send_new, DATASET_NAME,
7341 POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
7342 zfs_keys_send_new, ARRAY_SIZE(zfs_keys_send_new));
7343
7344 zfs_ioctl_register("send_space", ZFS_IOC_SEND_SPACE,
7345 zfs_ioc_send_space, zfs_secpolicy_read, DATASET_NAME,
7346 POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
7347 zfs_keys_send_space, ARRAY_SIZE(zfs_keys_send_space));
7348
7349 zfs_ioctl_register("create", ZFS_IOC_CREATE,
7350 zfs_ioc_create, zfs_secpolicy_create_clone, DATASET_NAME,
7351 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7352 zfs_keys_create, ARRAY_SIZE(zfs_keys_create));
7353
7354 zfs_ioctl_register("clone", ZFS_IOC_CLONE,
7355 zfs_ioc_clone, zfs_secpolicy_create_clone, DATASET_NAME,
7356 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7357 zfs_keys_clone, ARRAY_SIZE(zfs_keys_clone));
7358
7359 zfs_ioctl_register("remap", ZFS_IOC_REMAP,
7360 zfs_ioc_remap, zfs_secpolicy_none, DATASET_NAME,
7361 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE,
7362 zfs_keys_remap, ARRAY_SIZE(zfs_keys_remap));
7363
7364 zfs_ioctl_register("destroy_snaps", ZFS_IOC_DESTROY_SNAPS,
7365 zfs_ioc_destroy_snaps, zfs_secpolicy_destroy_snaps, POOL_NAME,
7366 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7367 zfs_keys_destroy_snaps, ARRAY_SIZE(zfs_keys_destroy_snaps));
7368
7369 zfs_ioctl_register("hold", ZFS_IOC_HOLD,
7370 zfs_ioc_hold, zfs_secpolicy_hold, POOL_NAME,
7371 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7372 zfs_keys_hold, ARRAY_SIZE(zfs_keys_hold));
7373 zfs_ioctl_register("release", ZFS_IOC_RELEASE,
7374 zfs_ioc_release, zfs_secpolicy_release, POOL_NAME,
7375 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7376 zfs_keys_release, ARRAY_SIZE(zfs_keys_release));
7377
7378 zfs_ioctl_register("get_holds", ZFS_IOC_GET_HOLDS,
7379 zfs_ioc_get_holds, zfs_secpolicy_read, DATASET_NAME,
7380 POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
7381 zfs_keys_get_holds, ARRAY_SIZE(zfs_keys_get_holds));
7382
7383 zfs_ioctl_register("rollback", ZFS_IOC_ROLLBACK,
7384 zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME,
7385 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE,
7386 zfs_keys_rollback, ARRAY_SIZE(zfs_keys_rollback));
7387
7388 zfs_ioctl_register("bookmark", ZFS_IOC_BOOKMARK,
7389 zfs_ioc_bookmark, zfs_secpolicy_bookmark, POOL_NAME,
7390 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7391 zfs_keys_bookmark, ARRAY_SIZE(zfs_keys_bookmark));
7392
7393 zfs_ioctl_register("get_bookmarks", ZFS_IOC_GET_BOOKMARKS,
7394 zfs_ioc_get_bookmarks, zfs_secpolicy_read, DATASET_NAME,
7395 POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE,
7396 zfs_keys_get_bookmarks, ARRAY_SIZE(zfs_keys_get_bookmarks));
7397
7398 zfs_ioctl_register("get_bookmark_props", ZFS_IOC_GET_BOOKMARK_PROPS,
7399 zfs_ioc_get_bookmark_props, zfs_secpolicy_read, ENTITY_NAME,
7400 POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE, zfs_keys_get_bookmark_props,
7401 ARRAY_SIZE(zfs_keys_get_bookmark_props));
7402
7403 zfs_ioctl_register("destroy_bookmarks", ZFS_IOC_DESTROY_BOOKMARKS,
7404 zfs_ioc_destroy_bookmarks, zfs_secpolicy_destroy_bookmarks,
7405 POOL_NAME,
7406 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7407 zfs_keys_destroy_bookmarks,
7408 ARRAY_SIZE(zfs_keys_destroy_bookmarks));
7409
7410 zfs_ioctl_register("receive", ZFS_IOC_RECV_NEW,
7411 zfs_ioc_recv_new, zfs_secpolicy_recv, DATASET_NAME,
7412 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7413 zfs_keys_recv_new, ARRAY_SIZE(zfs_keys_recv_new));
7414 zfs_ioctl_register("load-key", ZFS_IOC_LOAD_KEY,
7415 zfs_ioc_load_key, zfs_secpolicy_load_key,
7416 DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE,
7417 zfs_keys_load_key, ARRAY_SIZE(zfs_keys_load_key));
7418 zfs_ioctl_register("unload-key", ZFS_IOC_UNLOAD_KEY,
7419 zfs_ioc_unload_key, zfs_secpolicy_load_key,
7420 DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE,
7421 zfs_keys_unload_key, ARRAY_SIZE(zfs_keys_unload_key));
7422 zfs_ioctl_register("change-key", ZFS_IOC_CHANGE_KEY,
7423 zfs_ioc_change_key, zfs_secpolicy_change_key,
7424 DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY,
7425 B_TRUE, B_TRUE, zfs_keys_change_key,
7426 ARRAY_SIZE(zfs_keys_change_key));
7427
7428 zfs_ioctl_register("sync", ZFS_IOC_POOL_SYNC,
7429 zfs_ioc_pool_sync, zfs_secpolicy_none, POOL_NAME,
7430 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
7431 zfs_keys_pool_sync, ARRAY_SIZE(zfs_keys_pool_sync));
7432 zfs_ioctl_register("reopen", ZFS_IOC_POOL_REOPEN, zfs_ioc_pool_reopen,
7433 zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED, B_TRUE,
7434 B_TRUE, zfs_keys_pool_reopen, ARRAY_SIZE(zfs_keys_pool_reopen));
7435
7436 zfs_ioctl_register("channel_program", ZFS_IOC_CHANNEL_PROGRAM,
7437 zfs_ioc_channel_program, zfs_secpolicy_config,
7438 POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE,
7439 B_TRUE, zfs_keys_channel_program,
7440 ARRAY_SIZE(zfs_keys_channel_program));
7441
7442 zfs_ioctl_register("redact", ZFS_IOC_REDACT,
7443 zfs_ioc_redact, zfs_secpolicy_config, DATASET_NAME,
7444 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7445 zfs_keys_redact, ARRAY_SIZE(zfs_keys_redact));
7446
7447 zfs_ioctl_register("zpool_checkpoint", ZFS_IOC_POOL_CHECKPOINT,
7448 zfs_ioc_pool_checkpoint, zfs_secpolicy_config, POOL_NAME,
7449 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7450 zfs_keys_pool_checkpoint, ARRAY_SIZE(zfs_keys_pool_checkpoint));
7451
7452 zfs_ioctl_register("zpool_discard_checkpoint",
7453 ZFS_IOC_POOL_DISCARD_CHECKPOINT, zfs_ioc_pool_discard_checkpoint,
7454 zfs_secpolicy_config, POOL_NAME,
7455 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7456 zfs_keys_pool_discard_checkpoint,
7457 ARRAY_SIZE(zfs_keys_pool_discard_checkpoint));
7458
7459 zfs_ioctl_register("zpool_prefetch",
7460 ZFS_IOC_POOL_PREFETCH, zfs_ioc_pool_prefetch,
7461 zfs_secpolicy_config, POOL_NAME,
7462 POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE,
7463 zfs_keys_pool_prefetch, ARRAY_SIZE(zfs_keys_pool_prefetch));
7464
7465 zfs_ioctl_register("initialize", ZFS_IOC_POOL_INITIALIZE,
7466 zfs_ioc_pool_initialize, zfs_secpolicy_config, POOL_NAME,
7467 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7468 zfs_keys_pool_initialize, ARRAY_SIZE(zfs_keys_pool_initialize));
7469
7470 zfs_ioctl_register("trim", ZFS_IOC_POOL_TRIM,
7471 zfs_ioc_pool_trim, zfs_secpolicy_config, POOL_NAME,
7472 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7473 zfs_keys_pool_trim, ARRAY_SIZE(zfs_keys_pool_trim));
7474
7475 zfs_ioctl_register("wait", ZFS_IOC_WAIT,
7476 zfs_ioc_wait, zfs_secpolicy_none, POOL_NAME,
7477 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
7478 zfs_keys_pool_wait, ARRAY_SIZE(zfs_keys_pool_wait));
7479
7480 zfs_ioctl_register("wait_fs", ZFS_IOC_WAIT_FS,
7481 zfs_ioc_wait_fs, zfs_secpolicy_none, DATASET_NAME,
7482 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
7483 zfs_keys_fs_wait, ARRAY_SIZE(zfs_keys_fs_wait));
7484
7485 zfs_ioctl_register("set_bootenv", ZFS_IOC_SET_BOOTENV,
7486 zfs_ioc_set_bootenv, zfs_secpolicy_config, POOL_NAME,
7487 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE,
7488 zfs_keys_set_bootenv, ARRAY_SIZE(zfs_keys_set_bootenv));
7489
7490 zfs_ioctl_register("get_bootenv", ZFS_IOC_GET_BOOTENV,
7491 zfs_ioc_get_bootenv, zfs_secpolicy_none, POOL_NAME,
7492 POOL_CHECK_SUSPENDED, B_FALSE, B_TRUE,
7493 zfs_keys_get_bootenv, ARRAY_SIZE(zfs_keys_get_bootenv));
7494
7495 zfs_ioctl_register("zpool_vdev_get_props", ZFS_IOC_VDEV_GET_PROPS,
7496 zfs_ioc_vdev_get_props, zfs_secpolicy_read, POOL_NAME,
7497 POOL_CHECK_NONE, B_FALSE, B_FALSE, zfs_keys_vdev_get_props,
7498 ARRAY_SIZE(zfs_keys_vdev_get_props));
7499
7500 zfs_ioctl_register("zpool_vdev_set_props", ZFS_IOC_VDEV_SET_PROPS,
7501 zfs_ioc_vdev_set_props, zfs_secpolicy_config, POOL_NAME,
7502 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
7503 zfs_keys_vdev_set_props, ARRAY_SIZE(zfs_keys_vdev_set_props));
7504
7505 zfs_ioctl_register("scrub", ZFS_IOC_POOL_SCRUB,
7506 zfs_ioc_pool_scrub, zfs_secpolicy_config, POOL_NAME,
7507 POOL_CHECK_NONE, B_TRUE, B_TRUE,
7508 zfs_keys_pool_scrub, ARRAY_SIZE(zfs_keys_pool_scrub));
7509
7510 zfs_ioctl_register("get_props", ZFS_IOC_POOL_GET_PROPS,
7511 zfs_ioc_pool_get_props, zfs_secpolicy_read, POOL_NAME,
7512 POOL_CHECK_NONE, B_FALSE, B_FALSE,
7513 zfs_keys_get_props, ARRAY_SIZE(zfs_keys_get_props));
7514
7515 zfs_ioctl_register("zpool_ddt_prune", ZFS_IOC_DDT_PRUNE,
7516 zfs_ioc_ddt_prune, zfs_secpolicy_config, POOL_NAME,
7517 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
7518 zfs_keys_ddt_prune, ARRAY_SIZE(zfs_keys_ddt_prune));
7519
7520 /* IOCTLS that use the legacy function signature */
7521
7522 zfs_ioctl_register_legacy(ZFS_IOC_POOL_FREEZE, zfs_ioc_pool_freeze,
7523 zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_READONLY);
7524
7525 zfs_ioctl_register_pool(ZFS_IOC_POOL_CREATE, zfs_ioc_pool_create,
7526 zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE);
7527 zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SCAN,
7528 zfs_ioc_pool_scan);
7529 zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_UPGRADE,
7530 zfs_ioc_pool_upgrade);
7531 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ADD,
7532 zfs_ioc_vdev_add);
7533 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_REMOVE,
7534 zfs_ioc_vdev_remove);
7535 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SET_STATE,
7536 zfs_ioc_vdev_set_state);
7537 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ATTACH,
7538 zfs_ioc_vdev_attach);
7539 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_DETACH,
7540 zfs_ioc_vdev_detach);
7541 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETPATH,
7542 zfs_ioc_vdev_setpath);
7543 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETFRU,
7544 zfs_ioc_vdev_setfru);
7545 zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SET_PROPS,
7546 zfs_ioc_pool_set_props);
7547 zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SPLIT,
7548 zfs_ioc_vdev_split);
7549 zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_REGUID,
7550 zfs_ioc_pool_reguid);
7551
7552 zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_CONFIGS,
7553 zfs_ioc_pool_configs, zfs_secpolicy_none);
7554 zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_TRYIMPORT,
7555 zfs_ioc_pool_tryimport, zfs_secpolicy_config);
7556 zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_FAULT,
7557 zfs_ioc_inject_fault, zfs_secpolicy_inject);
7558 zfs_ioctl_register_pool_meta(ZFS_IOC_CLEAR_FAULT,
7559 zfs_ioc_clear_fault, zfs_secpolicy_inject);
7560 zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_LIST_NEXT,
7561 zfs_ioc_inject_list_next, zfs_secpolicy_inject);
7562
7563 /*
7564 * pool destroy, and export don't log the history as part of
7565 * zfsdev_ioctl, but rather zfs_ioc_pool_export
7566 * does the logging of those commands.
7567 */
7568 zfs_ioctl_register_pool(ZFS_IOC_POOL_DESTROY, zfs_ioc_pool_destroy,
7569 zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
7570 zfs_ioctl_register_pool(ZFS_IOC_POOL_EXPORT, zfs_ioc_pool_export,
7571 zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
7572
7573 zfs_ioctl_register_pool(ZFS_IOC_POOL_STATS, zfs_ioc_pool_stats,
7574 zfs_secpolicy_read, B_FALSE, POOL_CHECK_NONE);
7575
7576 zfs_ioctl_register_pool(ZFS_IOC_ERROR_LOG, zfs_ioc_error_log,
7577 zfs_secpolicy_inject, B_FALSE, POOL_CHECK_SUSPENDED);
7578 zfs_ioctl_register_pool(ZFS_IOC_DSOBJ_TO_DSNAME,
7579 zfs_ioc_dsobj_to_dsname,
7580 zfs_secpolicy_diff, B_FALSE, POOL_CHECK_SUSPENDED);
7581 zfs_ioctl_register_pool(ZFS_IOC_POOL_GET_HISTORY,
7582 zfs_ioc_pool_get_history,
7583 zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
7584
7585 zfs_ioctl_register_pool(ZFS_IOC_POOL_IMPORT, zfs_ioc_pool_import,
7586 zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE);
7587
7588 zfs_ioctl_register_pool(ZFS_IOC_CLEAR, zfs_ioc_clear,
7589 zfs_secpolicy_config, B_TRUE, POOL_CHECK_READONLY);
7590
7591 zfs_ioctl_register_dataset_read(ZFS_IOC_SPACE_WRITTEN,
7592 zfs_ioc_space_written);
7593 zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_RECVD_PROPS,
7594 zfs_ioc_objset_recvd_props);
7595 zfs_ioctl_register_dataset_read(ZFS_IOC_NEXT_OBJ,
7596 zfs_ioc_next_obj);
7597 zfs_ioctl_register_dataset_read(ZFS_IOC_GET_FSACL,
7598 zfs_ioc_get_fsacl);
7599 zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_STATS,
7600 zfs_ioc_objset_stats);
7601 zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_ZPLPROPS,
7602 zfs_ioc_objset_zplprops);
7603 zfs_ioctl_register_dataset_read(ZFS_IOC_DATASET_LIST_NEXT,
7604 zfs_ioc_dataset_list_next);
7605 zfs_ioctl_register_dataset_read(ZFS_IOC_SNAPSHOT_LIST_NEXT,
7606 zfs_ioc_snapshot_list_next);
7607 zfs_ioctl_register_dataset_read(ZFS_IOC_SEND_PROGRESS,
7608 zfs_ioc_send_progress);
7609
7610 zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_DIFF,
7611 zfs_ioc_diff, zfs_secpolicy_diff);
7612 zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_STATS,
7613 zfs_ioc_obj_to_stats, zfs_secpolicy_diff);
7614 zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_PATH,
7615 zfs_ioc_obj_to_path, zfs_secpolicy_diff);
7616 zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_ONE,
7617 zfs_ioc_userspace_one, zfs_secpolicy_userspace_one);
7618 zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_MANY,
7619 zfs_ioc_userspace_many, zfs_secpolicy_userspace_many);
7620 zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_SEND,
7621 zfs_ioc_send, zfs_secpolicy_send);
7622
7623 zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_PROP, zfs_ioc_set_prop,
7624 zfs_secpolicy_none);
7625 zfs_ioctl_register_dataset_modify(ZFS_IOC_DESTROY, zfs_ioc_destroy,
7626 zfs_secpolicy_destroy);
7627 zfs_ioctl_register_dataset_modify(ZFS_IOC_RENAME, zfs_ioc_rename,
7628 zfs_secpolicy_rename);
7629 zfs_ioctl_register_dataset_modify(ZFS_IOC_RECV, zfs_ioc_recv,
7630 zfs_secpolicy_recv);
7631 zfs_ioctl_register_dataset_modify(ZFS_IOC_PROMOTE, zfs_ioc_promote,
7632 zfs_secpolicy_promote);
7633 zfs_ioctl_register_dataset_modify(ZFS_IOC_INHERIT_PROP,
7634 zfs_ioc_inherit_prop, zfs_secpolicy_inherit_prop);
7635 zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_FSACL, zfs_ioc_set_fsacl,
7636 zfs_secpolicy_set_fsacl);
7637
7638 zfs_ioctl_register_dataset_nolog(ZFS_IOC_SHARE, zfs_ioc_share,
7639 zfs_secpolicy_share, POOL_CHECK_NONE);
7640 zfs_ioctl_register_dataset_nolog(ZFS_IOC_SMB_ACL, zfs_ioc_smb_acl,
7641 zfs_secpolicy_smb_acl, POOL_CHECK_NONE);
7642 zfs_ioctl_register_dataset_nolog(ZFS_IOC_USERSPACE_UPGRADE,
7643 zfs_ioc_userspace_upgrade, zfs_secpolicy_userspace_upgrade,
7644 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
7645 zfs_ioctl_register_dataset_nolog(ZFS_IOC_TMP_SNAPSHOT,
7646 zfs_ioc_tmp_snapshot, zfs_secpolicy_tmp_snapshot,
7647 POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
7648
7649 zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_NEXT, zfs_ioc_events_next,
7650 zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
7651 zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_CLEAR, zfs_ioc_events_clear,
7652 zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
7653 zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_SEEK, zfs_ioc_events_seek,
7654 zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
7655
7656 zfs_ioctl_init_os();
7657 }
7658
7659 /*
7660 * Verify that for non-legacy ioctls the input nvlist
7661 * pairs match against the expected input.
7662 *
7663 * Possible errors are:
7664 * ZFS_ERR_IOC_ARG_UNAVAIL An unrecognized nvpair was encountered
7665 * ZFS_ERR_IOC_ARG_REQUIRED A required nvpair is missing
7666 * ZFS_ERR_IOC_ARG_BADTYPE Invalid type for nvpair
7667 */
7668 static int
zfs_check_input_nvpairs(nvlist_t * innvl,const zfs_ioc_vec_t * vec)7669 zfs_check_input_nvpairs(nvlist_t *innvl, const zfs_ioc_vec_t *vec)
7670 {
7671 const zfs_ioc_key_t *nvl_keys = vec->zvec_nvl_keys;
7672 boolean_t required_keys_found = B_FALSE;
7673
7674 /*
7675 * examine each input pair
7676 */
7677 for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
7678 pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
7679 const char *name = nvpair_name(pair);
7680 data_type_t type = nvpair_type(pair);
7681 boolean_t identified = B_FALSE;
7682
7683 /*
7684 * check pair against the documented names and type
7685 */
7686 for (int k = 0; k < vec->zvec_nvl_key_count; k++) {
7687 /* if not a wild card name, check for an exact match */
7688 if ((nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) == 0 &&
7689 strcmp(nvl_keys[k].zkey_name, name) != 0)
7690 continue;
7691
7692 identified = B_TRUE;
7693
7694 if (nvl_keys[k].zkey_type != DATA_TYPE_ANY &&
7695 nvl_keys[k].zkey_type != type) {
7696 return (SET_ERROR(ZFS_ERR_IOC_ARG_BADTYPE));
7697 }
7698
7699 if (nvl_keys[k].zkey_flags & ZK_OPTIONAL)
7700 continue;
7701
7702 required_keys_found = B_TRUE;
7703 break;
7704 }
7705
7706 /* allow an 'optional' key, everything else is invalid */
7707 if (!identified &&
7708 (strcmp(name, "optional") != 0 ||
7709 type != DATA_TYPE_NVLIST)) {
7710 return (SET_ERROR(ZFS_ERR_IOC_ARG_UNAVAIL));
7711 }
7712 }
7713
7714 /* verify that all required keys were found */
7715 for (int k = 0; k < vec->zvec_nvl_key_count; k++) {
7716 if (nvl_keys[k].zkey_flags & ZK_OPTIONAL)
7717 continue;
7718
7719 if (nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) {
7720 /* at least one non-optional key is expected here */
7721 if (!required_keys_found)
7722 return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED));
7723 continue;
7724 }
7725
7726 if (!nvlist_exists(innvl, nvl_keys[k].zkey_name))
7727 return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED));
7728 }
7729
7730 return (0);
7731 }
7732
7733 static int
pool_status_check(const char * name,zfs_ioc_namecheck_t type,zfs_ioc_poolcheck_t check)7734 pool_status_check(const char *name, zfs_ioc_namecheck_t type,
7735 zfs_ioc_poolcheck_t check)
7736 {
7737 spa_t *spa;
7738 int error;
7739
7740 ASSERT(type == POOL_NAME || type == DATASET_NAME ||
7741 type == ENTITY_NAME);
7742
7743 if (check & POOL_CHECK_NONE)
7744 return (0);
7745
7746 error = spa_open(name, &spa, FTAG);
7747 if (error == 0) {
7748 if ((check & POOL_CHECK_SUSPENDED) && spa_suspended(spa))
7749 error = SET_ERROR(EAGAIN);
7750 else if ((check & POOL_CHECK_READONLY) && !spa_writeable(spa))
7751 error = SET_ERROR(EROFS);
7752 spa_close(spa, FTAG);
7753 }
7754 return (error);
7755 }
7756
7757 int
zfsdev_getminor(zfs_file_t * fp,minor_t * minorp)7758 zfsdev_getminor(zfs_file_t *fp, minor_t *minorp)
7759 {
7760 zfsdev_state_t *zs, *fpd;
7761
7762 ASSERT(!MUTEX_HELD(&zfsdev_state_lock));
7763
7764 fpd = zfs_file_private(fp);
7765 if (fpd == NULL)
7766 return (SET_ERROR(EBADF));
7767
7768 mutex_enter(&zfsdev_state_lock);
7769
7770 for (zs = &zfsdev_state_listhead; zs != NULL; zs = zs->zs_next) {
7771
7772 if (zs->zs_minor == -1)
7773 continue;
7774
7775 if (fpd == zs) {
7776 *minorp = fpd->zs_minor;
7777 mutex_exit(&zfsdev_state_lock);
7778 return (0);
7779 }
7780 }
7781
7782 mutex_exit(&zfsdev_state_lock);
7783
7784 return (SET_ERROR(EBADF));
7785 }
7786
7787 void *
zfsdev_get_state(minor_t minor,enum zfsdev_state_type which)7788 zfsdev_get_state(minor_t minor, enum zfsdev_state_type which)
7789 {
7790 zfsdev_state_t *zs;
7791
7792 for (zs = &zfsdev_state_listhead; zs != NULL; zs = zs->zs_next) {
7793 if (zs->zs_minor == minor) {
7794 membar_consumer();
7795 switch (which) {
7796 case ZST_ONEXIT:
7797 return (zs->zs_onexit);
7798 case ZST_ZEVENT:
7799 return (zs->zs_zevent);
7800 case ZST_ALL:
7801 return (zs);
7802 }
7803 }
7804 }
7805
7806 return (NULL);
7807 }
7808
7809 /*
7810 * Find a free minor number. The zfsdev_state_list is expected to
7811 * be short since it is only a list of currently open file handles.
7812 */
7813 static minor_t
zfsdev_minor_alloc(void)7814 zfsdev_minor_alloc(void)
7815 {
7816 static minor_t last_minor = 0;
7817 minor_t m;
7818
7819 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
7820
7821 for (m = last_minor + 1; m != last_minor; m++) {
7822 if (m > ZFSDEV_MAX_MINOR)
7823 m = 1;
7824 if (zfsdev_get_state(m, ZST_ALL) == NULL) {
7825 last_minor = m;
7826 return (m);
7827 }
7828 }
7829
7830 return (0);
7831 }
7832
7833 int
zfsdev_state_init(void * priv)7834 zfsdev_state_init(void *priv)
7835 {
7836 zfsdev_state_t *zs, *zsprev = NULL;
7837 minor_t minor;
7838 boolean_t newzs = B_FALSE;
7839
7840 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
7841
7842 minor = zfsdev_minor_alloc();
7843 if (minor == 0)
7844 return (SET_ERROR(ENXIO));
7845
7846 for (zs = &zfsdev_state_listhead; zs != NULL; zs = zs->zs_next) {
7847 if (zs->zs_minor == -1)
7848 break;
7849 zsprev = zs;
7850 }
7851
7852 if (!zs) {
7853 zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
7854 newzs = B_TRUE;
7855 }
7856
7857 zfsdev_private_set_state(priv, zs);
7858
7859 zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
7860 zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
7861
7862 /*
7863 * In order to provide for lock-free concurrent read access
7864 * to the minor list in zfsdev_get_state(), new entries
7865 * must be completely written before linking them into the
7866 * list whereas existing entries are already linked; the last
7867 * operation must be updating zs_minor (from -1 to the new
7868 * value).
7869 */
7870 if (newzs) {
7871 zs->zs_minor = minor;
7872 membar_producer();
7873 zsprev->zs_next = zs;
7874 } else {
7875 membar_producer();
7876 zs->zs_minor = minor;
7877 }
7878
7879 return (0);
7880 }
7881
7882 void
zfsdev_state_destroy(void * priv)7883 zfsdev_state_destroy(void *priv)
7884 {
7885 zfsdev_state_t *zs = zfsdev_private_get_state(priv);
7886
7887 ASSERT(zs != NULL);
7888 ASSERT3S(zs->zs_minor, >, 0);
7889
7890 /*
7891 * The last reference to this zfsdev file descriptor is being dropped.
7892 * We don't have to worry about lookup grabbing this state object, and
7893 * zfsdev_state_init() will not try to reuse this object until it is
7894 * invalidated by setting zs_minor to -1. Invalidation must be done
7895 * last, with a memory barrier to ensure ordering. This lets us avoid
7896 * taking the global zfsdev state lock around destruction.
7897 */
7898 zfs_onexit_destroy(zs->zs_onexit);
7899 zfs_zevent_destroy(zs->zs_zevent);
7900 zs->zs_onexit = NULL;
7901 zs->zs_zevent = NULL;
7902 membar_producer();
7903 zs->zs_minor = -1;
7904 }
7905
7906 long
zfsdev_ioctl_common(uint_t vecnum,zfs_cmd_t * zc,int flag)7907 zfsdev_ioctl_common(uint_t vecnum, zfs_cmd_t *zc, int flag)
7908 {
7909 int error, cmd;
7910 const zfs_ioc_vec_t *vec;
7911 char *saved_poolname = NULL;
7912 uint64_t max_nvlist_src_size;
7913 size_t saved_poolname_len = 0;
7914 nvlist_t *innvl = NULL;
7915 fstrans_cookie_t cookie;
7916 hrtime_t start_time = gethrtime();
7917
7918 cmd = vecnum;
7919 error = 0;
7920 if (vecnum >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0]))
7921 return (SET_ERROR(ZFS_ERR_IOC_CMD_UNAVAIL));
7922
7923 vec = &zfs_ioc_vec[vecnum];
7924
7925 /*
7926 * The registered ioctl list may be sparse, verify that either
7927 * a normal or legacy handler are registered.
7928 */
7929 if (vec->zvec_func == NULL && vec->zvec_legacy_func == NULL)
7930 return (SET_ERROR(ZFS_ERR_IOC_CMD_UNAVAIL));
7931
7932 zc->zc_iflags = flag & FKIOCTL;
7933 max_nvlist_src_size = zfs_max_nvlist_src_size_os();
7934 if (zc->zc_nvlist_src_size > max_nvlist_src_size) {
7935 /*
7936 * Make sure the user doesn't pass in an insane value for
7937 * zc_nvlist_src_size. We have to check, since we will end
7938 * up allocating that much memory inside of get_nvlist(). This
7939 * prevents a nefarious user from allocating tons of kernel
7940 * memory.
7941 *
7942 * Also, we return EINVAL instead of ENOMEM here. The reason
7943 * being that returning ENOMEM from an ioctl() has a special
7944 * connotation; that the user's size value is too small and
7945 * needs to be expanded to hold the nvlist. See
7946 * zcmd_expand_dst_nvlist() for details.
7947 */
7948 error = SET_ERROR(EINVAL); /* User's size too big */
7949
7950 } else if (zc->zc_nvlist_src_size != 0) {
7951 error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
7952 zc->zc_iflags, &innvl);
7953 if (error != 0)
7954 goto out;
7955 }
7956
7957 /*
7958 * Ensure that all pool/dataset names are valid before we pass down to
7959 * the lower layers.
7960 */
7961 zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
7962 switch (vec->zvec_namecheck) {
7963 case POOL_NAME:
7964 if (pool_namecheck(zc->zc_name, NULL, NULL) != 0)
7965 error = SET_ERROR(EINVAL);
7966 else
7967 error = pool_status_check(zc->zc_name,
7968 vec->zvec_namecheck, vec->zvec_pool_check);
7969 break;
7970
7971 case DATASET_NAME:
7972 if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0)
7973 error = SET_ERROR(EINVAL);
7974 else
7975 error = pool_status_check(zc->zc_name,
7976 vec->zvec_namecheck, vec->zvec_pool_check);
7977 break;
7978
7979 case ENTITY_NAME:
7980 if (entity_namecheck(zc->zc_name, NULL, NULL) != 0) {
7981 error = SET_ERROR(EINVAL);
7982 } else {
7983 error = pool_status_check(zc->zc_name,
7984 vec->zvec_namecheck, vec->zvec_pool_check);
7985 }
7986 break;
7987
7988 case NO_NAME:
7989 break;
7990 }
7991 /*
7992 * Ensure that all input pairs are valid before we pass them down
7993 * to the lower layers.
7994 *
7995 * The vectored functions can use fnvlist_lookup_{type} for any
7996 * required pairs since zfs_check_input_nvpairs() confirmed that
7997 * they exist and are of the correct type.
7998 */
7999 if (error == 0 && vec->zvec_func != NULL) {
8000 error = zfs_check_input_nvpairs(innvl, vec);
8001 if (error != 0)
8002 goto out;
8003 }
8004
8005 if (error == 0) {
8006 cookie = spl_fstrans_mark();
8007 error = vec->zvec_secpolicy(zc, innvl, CRED());
8008 spl_fstrans_unmark(cookie);
8009 }
8010
8011 if (error != 0)
8012 goto out;
8013
8014 /* legacy ioctls can modify zc_name */
8015 /*
8016 * Can't use kmem_strdup() as we might truncate the string and
8017 * kmem_strfree() would then free with incorrect size.
8018 */
8019 saved_poolname_len = strlen(zc->zc_name) + 1;
8020 saved_poolname = kmem_alloc(saved_poolname_len, KM_SLEEP);
8021
8022 strlcpy(saved_poolname, zc->zc_name, saved_poolname_len);
8023 saved_poolname[strcspn(saved_poolname, "/@#")] = '\0';
8024
8025 if (vec->zvec_func != NULL) {
8026 nvlist_t *outnvl;
8027 int puterror = 0;
8028 spa_t *spa;
8029 nvlist_t *lognv = NULL;
8030
8031 ASSERT(vec->zvec_legacy_func == NULL);
8032
8033 /*
8034 * Add the innvl to the lognv before calling the func,
8035 * in case the func changes the innvl.
8036 */
8037 if (vec->zvec_allow_log) {
8038 lognv = fnvlist_alloc();
8039 fnvlist_add_string(lognv, ZPOOL_HIST_IOCTL,
8040 vec->zvec_name);
8041 if (!nvlist_empty(innvl)) {
8042 fnvlist_add_nvlist(lognv, ZPOOL_HIST_INPUT_NVL,
8043 innvl);
8044 }
8045 }
8046
8047 outnvl = fnvlist_alloc();
8048 cookie = spl_fstrans_mark();
8049 error = vec->zvec_func(zc->zc_name, innvl, outnvl);
8050 spl_fstrans_unmark(cookie);
8051
8052 /*
8053 * Some commands can partially execute, modify state, and still
8054 * return an error. In these cases, attempt to record what
8055 * was modified.
8056 */
8057 if ((error == 0 ||
8058 (cmd == ZFS_IOC_CHANNEL_PROGRAM && error != EINVAL)) &&
8059 vec->zvec_allow_log &&
8060 spa_open(zc->zc_name, &spa, FTAG) == 0) {
8061 if (!nvlist_empty(outnvl)) {
8062 size_t out_size = fnvlist_size(outnvl);
8063 if (out_size > zfs_history_output_max) {
8064 fnvlist_add_int64(lognv,
8065 ZPOOL_HIST_OUTPUT_SIZE, out_size);
8066 } else {
8067 fnvlist_add_nvlist(lognv,
8068 ZPOOL_HIST_OUTPUT_NVL, outnvl);
8069 }
8070 }
8071 if (error != 0) {
8072 fnvlist_add_int64(lognv, ZPOOL_HIST_ERRNO,
8073 error);
8074 }
8075 fnvlist_add_int64(lognv, ZPOOL_HIST_ELAPSED_NS,
8076 gethrtime() - start_time);
8077 (void) spa_history_log_nvl(spa, lognv);
8078 spa_close(spa, FTAG);
8079 }
8080 fnvlist_free(lognv);
8081
8082 if (!nvlist_empty(outnvl) || zc->zc_nvlist_dst_size != 0) {
8083 int smusherror = 0;
8084 if (vec->zvec_smush_outnvlist) {
8085 smusherror = nvlist_smush(outnvl,
8086 zc->zc_nvlist_dst_size);
8087 }
8088 if (smusherror == 0)
8089 puterror = put_nvlist(zc, outnvl);
8090 }
8091
8092 if (puterror != 0)
8093 error = puterror;
8094
8095 nvlist_free(outnvl);
8096 } else {
8097 cookie = spl_fstrans_mark();
8098 error = vec->zvec_legacy_func(zc);
8099 spl_fstrans_unmark(cookie);
8100 }
8101
8102 out:
8103 nvlist_free(innvl);
8104 if (error == 0 && vec->zvec_allow_log) {
8105 char *s = tsd_get(zfs_allow_log_key);
8106 if (s != NULL)
8107 kmem_strfree(s);
8108 (void) tsd_set(zfs_allow_log_key, kmem_strdup(saved_poolname));
8109 }
8110 if (saved_poolname != NULL)
8111 kmem_free(saved_poolname, saved_poolname_len);
8112
8113 return (error);
8114 }
8115
8116 int
zfs_kmod_init(void)8117 zfs_kmod_init(void)
8118 {
8119 int error;
8120
8121 if ((error = zvol_init()) != 0)
8122 return (error);
8123
8124 spa_init(SPA_MODE_READ | SPA_MODE_WRITE);
8125 zfs_init();
8126
8127 zfs_ioctl_init();
8128
8129 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
8130 zfsdev_state_listhead.zs_minor = -1;
8131
8132 if ((error = zfsdev_attach()) != 0)
8133 goto out;
8134
8135 tsd_create(&rrw_tsd_key, rrw_tsd_destroy);
8136 tsd_create(&zfs_allow_log_key, zfs_allow_log_destroy);
8137
8138 return (0);
8139 out:
8140 zfs_fini();
8141 spa_fini();
8142 zvol_fini();
8143
8144 return (error);
8145 }
8146
8147 void
zfs_kmod_fini(void)8148 zfs_kmod_fini(void)
8149 {
8150 zfsdev_state_t *zs, *zsnext = NULL;
8151
8152 zfsdev_detach();
8153
8154 mutex_destroy(&zfsdev_state_lock);
8155
8156 for (zs = &zfsdev_state_listhead; zs != NULL; zs = zsnext) {
8157 zsnext = zs->zs_next;
8158 if (zs->zs_onexit)
8159 zfs_onexit_destroy(zs->zs_onexit);
8160 if (zs->zs_zevent)
8161 zfs_zevent_destroy(zs->zs_zevent);
8162 if (zs != &zfsdev_state_listhead)
8163 kmem_free(zs, sizeof (zfsdev_state_t));
8164 }
8165
8166 zfs_ereport_taskq_fini(); /* run before zfs_fini() on Linux */
8167 zfs_fini();
8168 spa_fini();
8169 zvol_fini();
8170
8171 tsd_destroy(&rrw_tsd_key);
8172 tsd_destroy(&zfs_allow_log_key);
8173 }
8174
8175 ZFS_MODULE_PARAM(zfs, zfs_, max_nvlist_src_size, U64, ZMOD_RW,
8176 "Maximum size in bytes allowed for src nvlist passed with ZFS ioctls");
8177
8178 ZFS_MODULE_PARAM(zfs, zfs_, history_output_max, U64, ZMOD_RW,
8179 "Maximum size in bytes of ZFS ioctl output that will be logged");
8180