xref: /freebsd/sys/contrib/openzfs/module/zfs/dsl_dir.c (revision be181ee2a28aa2b4b0e76684bce9f673ef668874)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24  * Copyright (c) 2013 Martin Matuska. All rights reserved.
25  * Copyright (c) 2014 Joyent, Inc. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
28  * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
29  */
30 
31 #include <sys/dmu.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_prop.h>
37 #include <sys/dsl_synctask.h>
38 #include <sys/dsl_deleg.h>
39 #include <sys/dmu_impl.h>
40 #include <sys/spa.h>
41 #include <sys/spa_impl.h>
42 #include <sys/metaslab.h>
43 #include <sys/zap.h>
44 #include <sys/zio.h>
45 #include <sys/arc.h>
46 #include <sys/sunddi.h>
47 #include <sys/zfeature.h>
48 #include <sys/policy.h>
49 #include <sys/zfs_vfsops.h>
50 #include <sys/zfs_znode.h>
51 #include <sys/zvol.h>
52 #include <sys/zthr.h>
53 #include "zfs_namecheck.h"
54 #include "zfs_prop.h"
55 
56 /*
57  * Filesystem and Snapshot Limits
58  * ------------------------------
59  *
60  * These limits are used to restrict the number of filesystems and/or snapshots
61  * that can be created at a given level in the tree or below. A typical
62  * use-case is with a delegated dataset where the administrator wants to ensure
63  * that a user within the zone is not creating too many additional filesystems
64  * or snapshots, even though they're not exceeding their space quota.
65  *
66  * The filesystem and snapshot counts are stored as extensible properties. This
67  * capability is controlled by a feature flag and must be enabled to be used.
68  * Once enabled, the feature is not active until the first limit is set. At
69  * that point, future operations to create/destroy filesystems or snapshots
70  * will validate and update the counts.
71  *
72  * Because the count properties will not exist before the feature is active,
73  * the counts are updated when a limit is first set on an uninitialized
74  * dsl_dir node in the tree (The filesystem/snapshot count on a node includes
75  * all of the nested filesystems/snapshots. Thus, a new leaf node has a
76  * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
77  * snapshot count properties on a node indicate uninitialized counts on that
78  * node.) When first setting a limit on an uninitialized node, the code starts
79  * at the filesystem with the new limit and descends into all sub-filesystems
80  * to add the count properties.
81  *
82  * In practice this is lightweight since a limit is typically set when the
83  * filesystem is created and thus has no children. Once valid, changing the
84  * limit value won't require a re-traversal since the counts are already valid.
85  * When recursively fixing the counts, if a node with a limit is encountered
86  * during the descent, the counts are known to be valid and there is no need to
87  * descend into that filesystem's children. The counts on filesystems above the
88  * one with the new limit will still be uninitialized, unless a limit is
89  * eventually set on one of those filesystems. The counts are always recursively
90  * updated when a limit is set on a dataset, unless there is already a limit.
91  * When a new limit value is set on a filesystem with an existing limit, it is
92  * possible for the new limit to be less than the current count at that level
93  * since a user who can change the limit is also allowed to exceed the limit.
94  *
95  * Once the feature is active, then whenever a filesystem or snapshot is
96  * created, the code recurses up the tree, validating the new count against the
97  * limit at each initialized level. In practice, most levels will not have a
98  * limit set. If there is a limit at any initialized level up the tree, the
99  * check must pass or the creation will fail. Likewise, when a filesystem or
100  * snapshot is destroyed, the counts are recursively adjusted all the way up
101  * the initialized nodes in the tree. Renaming a filesystem into different point
102  * in the tree will first validate, then update the counts on each branch up to
103  * the common ancestor. A receive will also validate the counts and then update
104  * them.
105  *
106  * An exception to the above behavior is that the limit is not enforced if the
107  * user has permission to modify the limit. This is primarily so that
108  * recursive snapshots in the global zone always work. We want to prevent a
109  * denial-of-service in which a lower level delegated dataset could max out its
110  * limit and thus block recursive snapshots from being taken in the global zone.
111  * Because of this, it is possible for the snapshot count to be over the limit
112  * and snapshots taken in the global zone could cause a lower level dataset to
113  * hit or exceed its limit. The administrator taking the global zone recursive
114  * snapshot should be aware of this side-effect and behave accordingly.
115  * For consistency, the filesystem limit is also not enforced if the user can
116  * modify the limit.
117  *
118  * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
119  * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
120  * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
121  * dsl_dir_init_fs_ss_count().
122  */
123 
124 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
125 
126 typedef struct ddulrt_arg {
127 	dsl_dir_t	*ddulrta_dd;
128 	uint64_t	ddlrta_txg;
129 } ddulrt_arg_t;
130 
131 static void
132 dsl_dir_evict_async(void *dbu)
133 {
134 	dsl_dir_t *dd = dbu;
135 	int t;
136 	dsl_pool_t *dp __maybe_unused = dd->dd_pool;
137 
138 	dd->dd_dbuf = NULL;
139 
140 	for (t = 0; t < TXG_SIZE; t++) {
141 		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
142 		ASSERT(dd->dd_tempreserved[t] == 0);
143 		ASSERT(dd->dd_space_towrite[t] == 0);
144 	}
145 
146 	if (dd->dd_parent)
147 		dsl_dir_async_rele(dd->dd_parent, dd);
148 
149 	spa_async_close(dd->dd_pool->dp_spa, dd);
150 
151 	if (dsl_deadlist_is_open(&dd->dd_livelist))
152 		dsl_dir_livelist_close(dd);
153 
154 	dsl_prop_fini(dd);
155 	cv_destroy(&dd->dd_activity_cv);
156 	mutex_destroy(&dd->dd_activity_lock);
157 	mutex_destroy(&dd->dd_lock);
158 	kmem_free(dd, sizeof (dsl_dir_t));
159 }
160 
161 int
162 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
163     const char *tail, const void *tag, dsl_dir_t **ddp)
164 {
165 	dmu_buf_t *dbuf;
166 	dsl_dir_t *dd;
167 	dmu_object_info_t doi;
168 	int err;
169 
170 	ASSERT(dsl_pool_config_held(dp));
171 
172 	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
173 	if (err != 0)
174 		return (err);
175 	dd = dmu_buf_get_user(dbuf);
176 
177 	dmu_object_info_from_db(dbuf, &doi);
178 	ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
179 	ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
180 
181 	if (dd == NULL) {
182 		dsl_dir_t *winner;
183 
184 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
185 		dd->dd_object = ddobj;
186 		dd->dd_dbuf = dbuf;
187 		dd->dd_pool = dp;
188 
189 		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
190 		mutex_init(&dd->dd_activity_lock, NULL, MUTEX_DEFAULT, NULL);
191 		cv_init(&dd->dd_activity_cv, NULL, CV_DEFAULT, NULL);
192 		dsl_prop_init(dd);
193 
194 		if (dsl_dir_is_zapified(dd)) {
195 			err = zap_lookup(dp->dp_meta_objset,
196 			    ddobj, DD_FIELD_CRYPTO_KEY_OBJ,
197 			    sizeof (uint64_t), 1, &dd->dd_crypto_obj);
198 			if (err == 0) {
199 				/* check for on-disk format errata */
200 				if (dsl_dir_incompatible_encryption_version(
201 				    dd)) {
202 					dp->dp_spa->spa_errata =
203 					    ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
204 				}
205 			} else if (err != ENOENT) {
206 				goto errout;
207 			}
208 		}
209 
210 		if (dsl_dir_phys(dd)->dd_parent_obj) {
211 			err = dsl_dir_hold_obj(dp,
212 			    dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
213 			    &dd->dd_parent);
214 			if (err != 0)
215 				goto errout;
216 			if (tail) {
217 #ifdef ZFS_DEBUG
218 				uint64_t foundobj;
219 
220 				err = zap_lookup(dp->dp_meta_objset,
221 				    dsl_dir_phys(dd->dd_parent)->
222 				    dd_child_dir_zapobj, tail,
223 				    sizeof (foundobj), 1, &foundobj);
224 				ASSERT(err || foundobj == ddobj);
225 #endif
226 				(void) strlcpy(dd->dd_myname, tail,
227 				    sizeof (dd->dd_myname));
228 			} else {
229 				err = zap_value_search(dp->dp_meta_objset,
230 				    dsl_dir_phys(dd->dd_parent)->
231 				    dd_child_dir_zapobj,
232 				    ddobj, 0, dd->dd_myname);
233 			}
234 			if (err != 0)
235 				goto errout;
236 		} else {
237 			(void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa),
238 			    sizeof (dd->dd_myname));
239 		}
240 
241 		if (dsl_dir_is_clone(dd)) {
242 			dmu_buf_t *origin_bonus;
243 			dsl_dataset_phys_t *origin_phys;
244 
245 			/*
246 			 * We can't open the origin dataset, because
247 			 * that would require opening this dsl_dir.
248 			 * Just look at its phys directly instead.
249 			 */
250 			err = dmu_bonus_hold(dp->dp_meta_objset,
251 			    dsl_dir_phys(dd)->dd_origin_obj, FTAG,
252 			    &origin_bonus);
253 			if (err != 0)
254 				goto errout;
255 			origin_phys = origin_bonus->db_data;
256 			dd->dd_origin_txg =
257 			    origin_phys->ds_creation_txg;
258 			dmu_buf_rele(origin_bonus, FTAG);
259 			if (dsl_dir_is_zapified(dd)) {
260 				uint64_t obj;
261 				err = zap_lookup(dp->dp_meta_objset,
262 				    dd->dd_object, DD_FIELD_LIVELIST,
263 				    sizeof (uint64_t), 1, &obj);
264 				if (err == 0)
265 					dsl_dir_livelist_open(dd, obj);
266 				else if (err != ENOENT)
267 					goto errout;
268 			}
269 		}
270 
271 		if (dsl_dir_is_zapified(dd)) {
272 			inode_timespec_t t = {0};
273 			(void) zap_lookup(dp->dp_meta_objset, ddobj,
274 			    DD_FIELD_SNAPSHOTS_CHANGED,
275 			    sizeof (uint64_t),
276 			    sizeof (inode_timespec_t) / sizeof (uint64_t),
277 			    &t);
278 			dd->dd_snap_cmtime = t;
279 		}
280 
281 		dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async,
282 		    &dd->dd_dbuf);
283 		winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
284 		if (winner != NULL) {
285 			if (dd->dd_parent)
286 				dsl_dir_rele(dd->dd_parent, dd);
287 			if (dsl_deadlist_is_open(&dd->dd_livelist))
288 				dsl_dir_livelist_close(dd);
289 			dsl_prop_fini(dd);
290 			cv_destroy(&dd->dd_activity_cv);
291 			mutex_destroy(&dd->dd_activity_lock);
292 			mutex_destroy(&dd->dd_lock);
293 			kmem_free(dd, sizeof (dsl_dir_t));
294 			dd = winner;
295 		} else {
296 			spa_open_ref(dp->dp_spa, dd);
297 		}
298 	}
299 
300 	/*
301 	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
302 	 * holds on the spa.  We need the open-to-close holds because
303 	 * otherwise the spa_refcnt wouldn't change when we open a
304 	 * dir which the spa also has open, so we could incorrectly
305 	 * think it was OK to unload/export/destroy the pool.  We need
306 	 * the instantiate-to-evict hold because the dsl_dir_t has a
307 	 * pointer to the dd_pool, which has a pointer to the spa_t.
308 	 */
309 	spa_open_ref(dp->dp_spa, tag);
310 	ASSERT3P(dd->dd_pool, ==, dp);
311 	ASSERT3U(dd->dd_object, ==, ddobj);
312 	ASSERT3P(dd->dd_dbuf, ==, dbuf);
313 	*ddp = dd;
314 	return (0);
315 
316 errout:
317 	if (dd->dd_parent)
318 		dsl_dir_rele(dd->dd_parent, dd);
319 	if (dsl_deadlist_is_open(&dd->dd_livelist))
320 		dsl_dir_livelist_close(dd);
321 	dsl_prop_fini(dd);
322 	cv_destroy(&dd->dd_activity_cv);
323 	mutex_destroy(&dd->dd_activity_lock);
324 	mutex_destroy(&dd->dd_lock);
325 	kmem_free(dd, sizeof (dsl_dir_t));
326 	dmu_buf_rele(dbuf, tag);
327 	return (err);
328 }
329 
330 void
331 dsl_dir_rele(dsl_dir_t *dd, const void *tag)
332 {
333 	dprintf_dd(dd, "%s\n", "");
334 	spa_close(dd->dd_pool->dp_spa, tag);
335 	dmu_buf_rele(dd->dd_dbuf, tag);
336 }
337 
338 /*
339  * Remove a reference to the given dsl dir that is being asynchronously
340  * released.  Async releases occur from a taskq performing eviction of
341  * dsl datasets and dirs.  This process is identical to a normal release
342  * with the exception of using the async API for releasing the reference on
343  * the spa.
344  */
345 void
346 dsl_dir_async_rele(dsl_dir_t *dd, const void *tag)
347 {
348 	dprintf_dd(dd, "%s\n", "");
349 	spa_async_close(dd->dd_pool->dp_spa, tag);
350 	dmu_buf_rele(dd->dd_dbuf, tag);
351 }
352 
353 /* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
354 void
355 dsl_dir_name(dsl_dir_t *dd, char *buf)
356 {
357 	if (dd->dd_parent) {
358 		dsl_dir_name(dd->dd_parent, buf);
359 		VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
360 		    ZFS_MAX_DATASET_NAME_LEN);
361 	} else {
362 		buf[0] = '\0';
363 	}
364 	if (!MUTEX_HELD(&dd->dd_lock)) {
365 		/*
366 		 * recursive mutex so that we can use
367 		 * dprintf_dd() with dd_lock held
368 		 */
369 		mutex_enter(&dd->dd_lock);
370 		VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
371 		    <, ZFS_MAX_DATASET_NAME_LEN);
372 		mutex_exit(&dd->dd_lock);
373 	} else {
374 		VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
375 		    <, ZFS_MAX_DATASET_NAME_LEN);
376 	}
377 }
378 
379 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
380 int
381 dsl_dir_namelen(dsl_dir_t *dd)
382 {
383 	int result = 0;
384 
385 	if (dd->dd_parent) {
386 		/* parent's name + 1 for the "/" */
387 		result = dsl_dir_namelen(dd->dd_parent) + 1;
388 	}
389 
390 	if (!MUTEX_HELD(&dd->dd_lock)) {
391 		/* see dsl_dir_name */
392 		mutex_enter(&dd->dd_lock);
393 		result += strlen(dd->dd_myname);
394 		mutex_exit(&dd->dd_lock);
395 	} else {
396 		result += strlen(dd->dd_myname);
397 	}
398 
399 	return (result);
400 }
401 
402 static int
403 getcomponent(const char *path, char *component, const char **nextp)
404 {
405 	char *p;
406 
407 	if ((path == NULL) || (path[0] == '\0'))
408 		return (SET_ERROR(ENOENT));
409 	/* This would be a good place to reserve some namespace... */
410 	p = strpbrk(path, "/@");
411 	if (p && (p[1] == '/' || p[1] == '@')) {
412 		/* two separators in a row */
413 		return (SET_ERROR(EINVAL));
414 	}
415 	if (p == NULL || p == path) {
416 		/*
417 		 * if the first thing is an @ or /, it had better be an
418 		 * @ and it had better not have any more ats or slashes,
419 		 * and it had better have something after the @.
420 		 */
421 		if (p != NULL &&
422 		    (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
423 			return (SET_ERROR(EINVAL));
424 		if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
425 			return (SET_ERROR(ENAMETOOLONG));
426 		(void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN);
427 		p = NULL;
428 	} else if (p[0] == '/') {
429 		if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
430 			return (SET_ERROR(ENAMETOOLONG));
431 		(void) strlcpy(component, path, p - path + 1);
432 		p++;
433 	} else if (p[0] == '@') {
434 		/*
435 		 * if the next separator is an @, there better not be
436 		 * any more slashes.
437 		 */
438 		if (strchr(path, '/'))
439 			return (SET_ERROR(EINVAL));
440 		if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
441 			return (SET_ERROR(ENAMETOOLONG));
442 		(void) strlcpy(component, path, p - path + 1);
443 	} else {
444 		panic("invalid p=%p", (void *)p);
445 	}
446 	*nextp = p;
447 	return (0);
448 }
449 
450 /*
451  * Return the dsl_dir_t, and possibly the last component which couldn't
452  * be found in *tail.  The name must be in the specified dsl_pool_t.  This
453  * thread must hold the dp_config_rwlock for the pool.  Returns NULL if the
454  * path is bogus, or if tail==NULL and we couldn't parse the whole name.
455  * (*tail)[0] == '@' means that the last component is a snapshot.
456  */
457 int
458 dsl_dir_hold(dsl_pool_t *dp, const char *name, const void *tag,
459     dsl_dir_t **ddp, const char **tailp)
460 {
461 	char *buf;
462 	const char *spaname, *next, *nextnext = NULL;
463 	int err;
464 	dsl_dir_t *dd;
465 	uint64_t ddobj;
466 
467 	buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
468 	err = getcomponent(name, buf, &next);
469 	if (err != 0)
470 		goto error;
471 
472 	/* Make sure the name is in the specified pool. */
473 	spaname = spa_name(dp->dp_spa);
474 	if (strcmp(buf, spaname) != 0) {
475 		err = SET_ERROR(EXDEV);
476 		goto error;
477 	}
478 
479 	ASSERT(dsl_pool_config_held(dp));
480 
481 	err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
482 	if (err != 0) {
483 		goto error;
484 	}
485 
486 	while (next != NULL) {
487 		dsl_dir_t *child_dd;
488 		err = getcomponent(next, buf, &nextnext);
489 		if (err != 0)
490 			break;
491 		ASSERT(next[0] != '\0');
492 		if (next[0] == '@')
493 			break;
494 		dprintf("looking up %s in obj%lld\n",
495 		    buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj);
496 
497 		err = zap_lookup(dp->dp_meta_objset,
498 		    dsl_dir_phys(dd)->dd_child_dir_zapobj,
499 		    buf, sizeof (ddobj), 1, &ddobj);
500 		if (err != 0) {
501 			if (err == ENOENT)
502 				err = 0;
503 			break;
504 		}
505 
506 		err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
507 		if (err != 0)
508 			break;
509 		dsl_dir_rele(dd, tag);
510 		dd = child_dd;
511 		next = nextnext;
512 	}
513 
514 	if (err != 0) {
515 		dsl_dir_rele(dd, tag);
516 		goto error;
517 	}
518 
519 	/*
520 	 * It's an error if there's more than one component left, or
521 	 * tailp==NULL and there's any component left.
522 	 */
523 	if (next != NULL &&
524 	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
525 		/* bad path name */
526 		dsl_dir_rele(dd, tag);
527 		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
528 		err = SET_ERROR(ENOENT);
529 	}
530 	if (tailp != NULL)
531 		*tailp = next;
532 	if (err == 0)
533 		*ddp = dd;
534 error:
535 	kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
536 	return (err);
537 }
538 
539 /*
540  * If the counts are already initialized for this filesystem and its
541  * descendants then do nothing, otherwise initialize the counts.
542  *
543  * The counts on this filesystem, and those below, may be uninitialized due to
544  * either the use of a pre-existing pool which did not support the
545  * filesystem/snapshot limit feature, or one in which the feature had not yet
546  * been enabled.
547  *
548  * Recursively descend the filesystem tree and update the filesystem/snapshot
549  * counts on each filesystem below, then update the cumulative count on the
550  * current filesystem. If the filesystem already has a count set on it,
551  * then we know that its counts, and the counts on the filesystems below it,
552  * are already correct, so we don't have to update this filesystem.
553  */
554 static void
555 dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
556 {
557 	uint64_t my_fs_cnt = 0;
558 	uint64_t my_ss_cnt = 0;
559 	dsl_pool_t *dp = dd->dd_pool;
560 	objset_t *os = dp->dp_meta_objset;
561 	zap_cursor_t *zc;
562 	zap_attribute_t *za;
563 	dsl_dataset_t *ds;
564 
565 	ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
566 	ASSERT(dsl_pool_config_held(dp));
567 	ASSERT(dmu_tx_is_syncing(tx));
568 
569 	dsl_dir_zapify(dd, tx);
570 
571 	/*
572 	 * If the filesystem count has already been initialized then we
573 	 * don't need to recurse down any further.
574 	 */
575 	if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
576 		return;
577 
578 	zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
579 	za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
580 
581 	/* Iterate my child dirs */
582 	for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
583 	    zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
584 		dsl_dir_t *chld_dd;
585 		uint64_t count;
586 
587 		VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
588 		    &chld_dd));
589 
590 		/*
591 		 * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets.
592 		 */
593 		if (chld_dd->dd_myname[0] == '$') {
594 			dsl_dir_rele(chld_dd, FTAG);
595 			continue;
596 		}
597 
598 		my_fs_cnt++;	/* count this child */
599 
600 		dsl_dir_init_fs_ss_count(chld_dd, tx);
601 
602 		VERIFY0(zap_lookup(os, chld_dd->dd_object,
603 		    DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
604 		my_fs_cnt += count;
605 		VERIFY0(zap_lookup(os, chld_dd->dd_object,
606 		    DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
607 		my_ss_cnt += count;
608 
609 		dsl_dir_rele(chld_dd, FTAG);
610 	}
611 	zap_cursor_fini(zc);
612 	/* Count my snapshots (we counted children's snapshots above) */
613 	VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
614 	    dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
615 
616 	for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
617 	    zap_cursor_retrieve(zc, za) == 0;
618 	    zap_cursor_advance(zc)) {
619 		/* Don't count temporary snapshots */
620 		if (za->za_name[0] != '%')
621 			my_ss_cnt++;
622 	}
623 	zap_cursor_fini(zc);
624 
625 	dsl_dataset_rele(ds, FTAG);
626 
627 	kmem_free(zc, sizeof (zap_cursor_t));
628 	kmem_free(za, sizeof (zap_attribute_t));
629 
630 	/* we're in a sync task, update counts */
631 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
632 	VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
633 	    sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
634 	VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
635 	    sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
636 }
637 
638 static int
639 dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
640 {
641 	char *ddname = (char *)arg;
642 	dsl_pool_t *dp = dmu_tx_pool(tx);
643 	dsl_dataset_t *ds;
644 	dsl_dir_t *dd;
645 	int error;
646 
647 	error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
648 	if (error != 0)
649 		return (error);
650 
651 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
652 		dsl_dataset_rele(ds, FTAG);
653 		return (SET_ERROR(ENOTSUP));
654 	}
655 
656 	dd = ds->ds_dir;
657 	if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
658 	    dsl_dir_is_zapified(dd) &&
659 	    zap_contains(dp->dp_meta_objset, dd->dd_object,
660 	    DD_FIELD_FILESYSTEM_COUNT) == 0) {
661 		dsl_dataset_rele(ds, FTAG);
662 		return (SET_ERROR(EALREADY));
663 	}
664 
665 	dsl_dataset_rele(ds, FTAG);
666 	return (0);
667 }
668 
669 static void
670 dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
671 {
672 	char *ddname = (char *)arg;
673 	dsl_pool_t *dp = dmu_tx_pool(tx);
674 	dsl_dataset_t *ds;
675 	spa_t *spa;
676 
677 	VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
678 
679 	spa = dsl_dataset_get_spa(ds);
680 
681 	if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
682 		/*
683 		 * Since the feature was not active and we're now setting a
684 		 * limit, increment the feature-active counter so that the
685 		 * feature becomes active for the first time.
686 		 *
687 		 * We are already in a sync task so we can update the MOS.
688 		 */
689 		spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
690 	}
691 
692 	/*
693 	 * Since we are now setting a non-UINT64_MAX limit on the filesystem,
694 	 * we need to ensure the counts are correct. Descend down the tree from
695 	 * this point and update all of the counts to be accurate.
696 	 */
697 	dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
698 
699 	dsl_dataset_rele(ds, FTAG);
700 }
701 
702 /*
703  * Make sure the feature is enabled and activate it if necessary.
704  * Since we're setting a limit, ensure the on-disk counts are valid.
705  * This is only called by the ioctl path when setting a limit value.
706  *
707  * We do not need to validate the new limit, since users who can change the
708  * limit are also allowed to exceed the limit.
709  */
710 int
711 dsl_dir_activate_fs_ss_limit(const char *ddname)
712 {
713 	int error;
714 
715 	error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
716 	    dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
717 	    ZFS_SPACE_CHECK_RESERVED);
718 
719 	if (error == EALREADY)
720 		error = 0;
721 
722 	return (error);
723 }
724 
725 /*
726  * Used to determine if the filesystem_limit or snapshot_limit should be
727  * enforced. We allow the limit to be exceeded if the user has permission to
728  * write the property value. We pass in the creds that we got in the open
729  * context since we will always be the GZ root in syncing context. We also have
730  * to handle the case where we are allowed to change the limit on the current
731  * dataset, but there may be another limit in the tree above.
732  *
733  * We can never modify these two properties within a non-global zone. In
734  * addition, the other checks are modeled on zfs_secpolicy_write_perms. We
735  * can't use that function since we are already holding the dp_config_rwlock.
736  * In addition, we already have the dd and dealing with snapshots is simplified
737  * in this code.
738  */
739 
740 typedef enum {
741 	ENFORCE_ALWAYS,
742 	ENFORCE_NEVER,
743 	ENFORCE_ABOVE
744 } enforce_res_t;
745 
746 static enforce_res_t
747 dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop,
748     cred_t *cr, proc_t *proc)
749 {
750 	enforce_res_t enforce = ENFORCE_ALWAYS;
751 	uint64_t obj;
752 	dsl_dataset_t *ds;
753 	uint64_t zoned;
754 	const char *zonedstr;
755 
756 	ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
757 	    prop == ZFS_PROP_SNAPSHOT_LIMIT);
758 
759 #ifdef _KERNEL
760 	if (crgetzoneid(cr) != GLOBAL_ZONEID)
761 		return (ENFORCE_ALWAYS);
762 
763 	/*
764 	 * We are checking the saved credentials of the user process, which is
765 	 * not the current process.  Note that we can't use secpolicy_zfs(),
766 	 * because it only works if the cred is that of the current process (on
767 	 * Linux).
768 	 */
769 	if (secpolicy_zfs_proc(cr, proc) == 0)
770 		return (ENFORCE_NEVER);
771 #else
772 	(void) proc;
773 #endif
774 
775 	if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
776 		return (ENFORCE_ALWAYS);
777 
778 	ASSERT(dsl_pool_config_held(dd->dd_pool));
779 
780 	if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
781 		return (ENFORCE_ALWAYS);
782 
783 	zonedstr = zfs_prop_to_name(ZFS_PROP_ZONED);
784 	if (dsl_prop_get_ds(ds, zonedstr, 8, 1, &zoned, NULL) || zoned) {
785 		/* Only root can access zoned fs's from the GZ */
786 		enforce = ENFORCE_ALWAYS;
787 	} else {
788 		if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
789 			enforce = ENFORCE_ABOVE;
790 	}
791 
792 	dsl_dataset_rele(ds, FTAG);
793 	return (enforce);
794 }
795 
796 /*
797  * Check if adding additional child filesystem(s) would exceed any filesystem
798  * limits or adding additional snapshot(s) would exceed any snapshot limits.
799  * The prop argument indicates which limit to check.
800  *
801  * Note that all filesystem limits up to the root (or the highest
802  * initialized) filesystem or the given ancestor must be satisfied.
803  */
804 int
805 dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
806     dsl_dir_t *ancestor, cred_t *cr, proc_t *proc)
807 {
808 	objset_t *os = dd->dd_pool->dp_meta_objset;
809 	uint64_t limit, count;
810 	const char *count_prop;
811 	enforce_res_t enforce;
812 	int err = 0;
813 
814 	ASSERT(dsl_pool_config_held(dd->dd_pool));
815 	ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
816 	    prop == ZFS_PROP_SNAPSHOT_LIMIT);
817 
818 	/*
819 	 * If we're allowed to change the limit, don't enforce the limit
820 	 * e.g. this can happen if a snapshot is taken by an administrative
821 	 * user in the global zone (i.e. a recursive snapshot by root).
822 	 * However, we must handle the case of delegated permissions where we
823 	 * are allowed to change the limit on the current dataset, but there
824 	 * is another limit in the tree above.
825 	 */
826 	enforce = dsl_enforce_ds_ss_limits(dd, prop, cr, proc);
827 	if (enforce == ENFORCE_NEVER)
828 		return (0);
829 
830 	/*
831 	 * e.g. if renaming a dataset with no snapshots, count adjustment
832 	 * is 0.
833 	 */
834 	if (delta == 0)
835 		return (0);
836 
837 	if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
838 		/*
839 		 * We don't enforce the limit for temporary snapshots. This is
840 		 * indicated by a NULL cred_t argument.
841 		 */
842 		if (cr == NULL)
843 			return (0);
844 
845 		count_prop = DD_FIELD_SNAPSHOT_COUNT;
846 	} else {
847 		count_prop = DD_FIELD_FILESYSTEM_COUNT;
848 	}
849 
850 	/*
851 	 * If an ancestor has been provided, stop checking the limit once we
852 	 * hit that dir. We need this during rename so that we don't overcount
853 	 * the check once we recurse up to the common ancestor.
854 	 */
855 	if (ancestor == dd)
856 		return (0);
857 
858 	/*
859 	 * If we hit an uninitialized node while recursing up the tree, we can
860 	 * stop since we know there is no limit here (or above). The counts are
861 	 * not valid on this node and we know we won't touch this node's counts.
862 	 */
863 	if (!dsl_dir_is_zapified(dd))
864 		return (0);
865 	err = zap_lookup(os, dd->dd_object,
866 	    count_prop, sizeof (count), 1, &count);
867 	if (err == ENOENT)
868 		return (0);
869 	if (err != 0)
870 		return (err);
871 
872 	err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
873 	    B_FALSE);
874 	if (err != 0)
875 		return (err);
876 
877 	/* Is there a limit which we've hit? */
878 	if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
879 		return (SET_ERROR(EDQUOT));
880 
881 	if (dd->dd_parent != NULL)
882 		err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
883 		    ancestor, cr, proc);
884 
885 	return (err);
886 }
887 
888 /*
889  * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
890  * parents. When a new filesystem/snapshot is created, increment the count on
891  * all parents, and when a filesystem/snapshot is destroyed, decrement the
892  * count.
893  */
894 void
895 dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
896     dmu_tx_t *tx)
897 {
898 	int err;
899 	objset_t *os = dd->dd_pool->dp_meta_objset;
900 	uint64_t count;
901 
902 	ASSERT(dsl_pool_config_held(dd->dd_pool));
903 	ASSERT(dmu_tx_is_syncing(tx));
904 	ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
905 	    strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
906 
907 	/*
908 	 * We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets.
909 	 */
910 	if (dd->dd_myname[0] == '$' && strcmp(prop,
911 	    DD_FIELD_FILESYSTEM_COUNT) == 0) {
912 		return;
913 	}
914 
915 	/*
916 	 * e.g. if renaming a dataset with no snapshots, count adjustment is 0
917 	 */
918 	if (delta == 0)
919 		return;
920 
921 	/*
922 	 * If we hit an uninitialized node while recursing up the tree, we can
923 	 * stop since we know the counts are not valid on this node and we
924 	 * know we shouldn't touch this node's counts. An uninitialized count
925 	 * on the node indicates that either the feature has not yet been
926 	 * activated or there are no limits on this part of the tree.
927 	 */
928 	if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
929 	    prop, sizeof (count), 1, &count)) == ENOENT)
930 		return;
931 	VERIFY0(err);
932 
933 	count += delta;
934 	/* Use a signed verify to make sure we're not neg. */
935 	VERIFY3S(count, >=, 0);
936 
937 	VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
938 	    tx));
939 
940 	/* Roll up this additional count into our ancestors */
941 	if (dd->dd_parent != NULL)
942 		dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
943 }
944 
945 uint64_t
946 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
947     dmu_tx_t *tx)
948 {
949 	objset_t *mos = dp->dp_meta_objset;
950 	uint64_t ddobj;
951 	dsl_dir_phys_t *ddphys;
952 	dmu_buf_t *dbuf;
953 
954 	ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
955 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
956 	if (pds) {
957 		VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
958 		    name, sizeof (uint64_t), 1, &ddobj, tx));
959 	} else {
960 		/* it's the root dir */
961 		VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
962 		    DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
963 	}
964 	VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
965 	dmu_buf_will_dirty(dbuf, tx);
966 	ddphys = dbuf->db_data;
967 
968 	ddphys->dd_creation_time = gethrestime_sec();
969 	if (pds) {
970 		ddphys->dd_parent_obj = pds->dd_object;
971 
972 		/* update the filesystem counts */
973 		dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
974 	}
975 	ddphys->dd_props_zapobj = zap_create(mos,
976 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
977 	ddphys->dd_child_dir_zapobj = zap_create(mos,
978 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
979 	if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
980 		ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
981 
982 	dmu_buf_rele(dbuf, FTAG);
983 
984 	return (ddobj);
985 }
986 
987 boolean_t
988 dsl_dir_is_clone(dsl_dir_t *dd)
989 {
990 	return (dsl_dir_phys(dd)->dd_origin_obj &&
991 	    (dd->dd_pool->dp_origin_snap == NULL ||
992 	    dsl_dir_phys(dd)->dd_origin_obj !=
993 	    dd->dd_pool->dp_origin_snap->ds_object));
994 }
995 
996 uint64_t
997 dsl_dir_get_used(dsl_dir_t *dd)
998 {
999 	return (dsl_dir_phys(dd)->dd_used_bytes);
1000 }
1001 
1002 uint64_t
1003 dsl_dir_get_compressed(dsl_dir_t *dd)
1004 {
1005 	return (dsl_dir_phys(dd)->dd_compressed_bytes);
1006 }
1007 
1008 uint64_t
1009 dsl_dir_get_quota(dsl_dir_t *dd)
1010 {
1011 	return (dsl_dir_phys(dd)->dd_quota);
1012 }
1013 
1014 uint64_t
1015 dsl_dir_get_reservation(dsl_dir_t *dd)
1016 {
1017 	return (dsl_dir_phys(dd)->dd_reserved);
1018 }
1019 
1020 uint64_t
1021 dsl_dir_get_compressratio(dsl_dir_t *dd)
1022 {
1023 	/* a fixed point number, 100x the ratio */
1024 	return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
1025 	    (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
1026 	    dsl_dir_phys(dd)->dd_compressed_bytes));
1027 }
1028 
1029 uint64_t
1030 dsl_dir_get_logicalused(dsl_dir_t *dd)
1031 {
1032 	return (dsl_dir_phys(dd)->dd_uncompressed_bytes);
1033 }
1034 
1035 uint64_t
1036 dsl_dir_get_usedsnap(dsl_dir_t *dd)
1037 {
1038 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
1039 }
1040 
1041 uint64_t
1042 dsl_dir_get_usedds(dsl_dir_t *dd)
1043 {
1044 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
1045 }
1046 
1047 uint64_t
1048 dsl_dir_get_usedrefreserv(dsl_dir_t *dd)
1049 {
1050 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
1051 }
1052 
1053 uint64_t
1054 dsl_dir_get_usedchild(dsl_dir_t *dd)
1055 {
1056 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
1057 	    dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
1058 }
1059 
1060 void
1061 dsl_dir_get_origin(dsl_dir_t *dd, char *buf)
1062 {
1063 	dsl_dataset_t *ds;
1064 	VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
1065 	    dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
1066 
1067 	dsl_dataset_name(ds, buf);
1068 
1069 	dsl_dataset_rele(ds, FTAG);
1070 }
1071 
1072 int
1073 dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
1074 {
1075 	if (dsl_dir_is_zapified(dd)) {
1076 		objset_t *os = dd->dd_pool->dp_meta_objset;
1077 		return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
1078 		    sizeof (*count), 1, count));
1079 	} else {
1080 		return (SET_ERROR(ENOENT));
1081 	}
1082 }
1083 
1084 int
1085 dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
1086 {
1087 	if (dsl_dir_is_zapified(dd)) {
1088 		objset_t *os = dd->dd_pool->dp_meta_objset;
1089 		return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
1090 		    sizeof (*count), 1, count));
1091 	} else {
1092 		return (SET_ERROR(ENOENT));
1093 	}
1094 }
1095 
1096 void
1097 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
1098 {
1099 	mutex_enter(&dd->dd_lock);
1100 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
1101 	    dsl_dir_get_quota(dd));
1102 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
1103 	    dsl_dir_get_reservation(dd));
1104 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
1105 	    dsl_dir_get_logicalused(dd));
1106 	if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1107 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
1108 		    dsl_dir_get_usedsnap(dd));
1109 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
1110 		    dsl_dir_get_usedds(dd));
1111 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
1112 		    dsl_dir_get_usedrefreserv(dd));
1113 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
1114 		    dsl_dir_get_usedchild(dd));
1115 	}
1116 	mutex_exit(&dd->dd_lock);
1117 
1118 	uint64_t count;
1119 	if (dsl_dir_get_filesystem_count(dd, &count) == 0) {
1120 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT,
1121 		    count);
1122 	}
1123 	if (dsl_dir_get_snapshot_count(dd, &count) == 0) {
1124 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
1125 		    count);
1126 	}
1127 
1128 	if (dsl_dir_is_clone(dd)) {
1129 		char buf[ZFS_MAX_DATASET_NAME_LEN];
1130 		dsl_dir_get_origin(dd, buf);
1131 		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
1132 	}
1133 
1134 }
1135 
1136 void
1137 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
1138 {
1139 	dsl_pool_t *dp = dd->dd_pool;
1140 
1141 	ASSERT(dsl_dir_phys(dd));
1142 
1143 	if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
1144 		/* up the hold count until we can be written out */
1145 		dmu_buf_add_ref(dd->dd_dbuf, dd);
1146 	}
1147 }
1148 
1149 static int64_t
1150 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
1151 {
1152 	uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
1153 	uint64_t new_accounted =
1154 	    MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
1155 	return (new_accounted - old_accounted);
1156 }
1157 
1158 void
1159 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
1160 {
1161 	ASSERT(dmu_tx_is_syncing(tx));
1162 
1163 	mutex_enter(&dd->dd_lock);
1164 	ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]);
1165 	dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg,
1166 	    (u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
1167 	dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0;
1168 	mutex_exit(&dd->dd_lock);
1169 
1170 	/* release the hold from dsl_dir_dirty */
1171 	dmu_buf_rele(dd->dd_dbuf, dd);
1172 }
1173 
1174 static uint64_t
1175 dsl_dir_space_towrite(dsl_dir_t *dd)
1176 {
1177 	uint64_t space = 0;
1178 
1179 	ASSERT(MUTEX_HELD(&dd->dd_lock));
1180 
1181 	for (int i = 0; i < TXG_SIZE; i++) {
1182 		space += dd->dd_space_towrite[i & TXG_MASK];
1183 		ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0);
1184 	}
1185 	return (space);
1186 }
1187 
1188 /*
1189  * How much space would dd have available if ancestor had delta applied
1190  * to it?  If ondiskonly is set, we're only interested in what's
1191  * on-disk, not estimated pending changes.
1192  */
1193 uint64_t
1194 dsl_dir_space_available(dsl_dir_t *dd,
1195     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
1196 {
1197 	uint64_t parentspace, myspace, quota, used;
1198 
1199 	/*
1200 	 * If there are no restrictions otherwise, assume we have
1201 	 * unlimited space available.
1202 	 */
1203 	quota = UINT64_MAX;
1204 	parentspace = UINT64_MAX;
1205 
1206 	if (dd->dd_parent != NULL) {
1207 		parentspace = dsl_dir_space_available(dd->dd_parent,
1208 		    ancestor, delta, ondiskonly);
1209 	}
1210 
1211 	mutex_enter(&dd->dd_lock);
1212 	if (dsl_dir_phys(dd)->dd_quota != 0)
1213 		quota = dsl_dir_phys(dd)->dd_quota;
1214 	used = dsl_dir_phys(dd)->dd_used_bytes;
1215 	if (!ondiskonly)
1216 		used += dsl_dir_space_towrite(dd);
1217 
1218 	if (dd->dd_parent == NULL) {
1219 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool,
1220 		    ZFS_SPACE_CHECK_NORMAL);
1221 		quota = MIN(quota, poolsize);
1222 	}
1223 
1224 	if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
1225 		/*
1226 		 * We have some space reserved, in addition to what our
1227 		 * parent gave us.
1228 		 */
1229 		parentspace += dsl_dir_phys(dd)->dd_reserved - used;
1230 	}
1231 
1232 	if (dd == ancestor) {
1233 		ASSERT(delta <= 0);
1234 		ASSERT(used >= -delta);
1235 		used += delta;
1236 		if (parentspace != UINT64_MAX)
1237 			parentspace -= delta;
1238 	}
1239 
1240 	if (used > quota) {
1241 		/* over quota */
1242 		myspace = 0;
1243 	} else {
1244 		/*
1245 		 * the lesser of the space provided by our parent and
1246 		 * the space left in our quota
1247 		 */
1248 		myspace = MIN(parentspace, quota - used);
1249 	}
1250 
1251 	mutex_exit(&dd->dd_lock);
1252 
1253 	return (myspace);
1254 }
1255 
1256 struct tempreserve {
1257 	list_node_t tr_node;
1258 	dsl_dir_t *tr_ds;
1259 	uint64_t tr_size;
1260 };
1261 
1262 static int
1263 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
1264     boolean_t ignorequota, list_t *tr_list,
1265     dmu_tx_t *tx, boolean_t first)
1266 {
1267 	uint64_t txg;
1268 	uint64_t quota;
1269 	struct tempreserve *tr;
1270 	int retval;
1271 	uint64_t ref_rsrv;
1272 
1273 top_of_function:
1274 	txg = tx->tx_txg;
1275 	retval = EDQUOT;
1276 	ref_rsrv = 0;
1277 
1278 	ASSERT3U(txg, !=, 0);
1279 	ASSERT3S(asize, >, 0);
1280 
1281 	mutex_enter(&dd->dd_lock);
1282 
1283 	/*
1284 	 * Check against the dsl_dir's quota.  We don't add in the delta
1285 	 * when checking for over-quota because they get one free hit.
1286 	 */
1287 	uint64_t est_inflight = dsl_dir_space_towrite(dd);
1288 	for (int i = 0; i < TXG_SIZE; i++)
1289 		est_inflight += dd->dd_tempreserved[i];
1290 	uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
1291 
1292 	/*
1293 	 * On the first iteration, fetch the dataset's used-on-disk and
1294 	 * refreservation values. Also, if checkrefquota is set, test if
1295 	 * allocating this space would exceed the dataset's refquota.
1296 	 */
1297 	if (first && tx->tx_objset) {
1298 		int error;
1299 		dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
1300 
1301 		error = dsl_dataset_check_quota(ds, !netfree,
1302 		    asize, est_inflight, &used_on_disk, &ref_rsrv);
1303 		if (error != 0) {
1304 			mutex_exit(&dd->dd_lock);
1305 			DMU_TX_STAT_BUMP(dmu_tx_quota);
1306 			return (error);
1307 		}
1308 	}
1309 
1310 	/*
1311 	 * If this transaction will result in a net free of space,
1312 	 * we want to let it through.
1313 	 */
1314 	if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
1315 		quota = UINT64_MAX;
1316 	else
1317 		quota = dsl_dir_phys(dd)->dd_quota;
1318 
1319 	/*
1320 	 * Adjust the quota against the actual pool size at the root
1321 	 * minus any outstanding deferred frees.
1322 	 * To ensure that it's possible to remove files from a full
1323 	 * pool without inducing transient overcommits, we throttle
1324 	 * netfree transactions against a quota that is slightly larger,
1325 	 * but still within the pool's allocation slop.  In cases where
1326 	 * we're very close to full, this will allow a steady trickle of
1327 	 * removes to get through.
1328 	 */
1329 	if (dd->dd_parent == NULL) {
1330 		uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool,
1331 		    (netfree) ?
1332 		    ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL);
1333 
1334 		if (avail < quota) {
1335 			quota = avail;
1336 			retval = SET_ERROR(ENOSPC);
1337 		}
1338 	}
1339 
1340 	/*
1341 	 * If they are requesting more space, and our current estimate
1342 	 * is over quota, they get to try again unless the actual
1343 	 * on-disk is over quota and there are no pending changes
1344 	 * or deferred frees (which may free up space for us).
1345 	 */
1346 	if (used_on_disk + est_inflight >= quota) {
1347 		if (est_inflight > 0 || used_on_disk < quota) {
1348 			retval = SET_ERROR(ERESTART);
1349 		} else {
1350 			ASSERT3U(used_on_disk, >=, quota);
1351 
1352 			if (retval == ENOSPC && (used_on_disk - quota) <
1353 			    dsl_pool_deferred_space(dd->dd_pool)) {
1354 				retval = SET_ERROR(ERESTART);
1355 			}
1356 		}
1357 
1358 		dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
1359 		    "quota=%lluK tr=%lluK err=%d\n",
1360 		    (u_longlong_t)used_on_disk>>10,
1361 		    (u_longlong_t)est_inflight>>10,
1362 		    (u_longlong_t)quota>>10, (u_longlong_t)asize>>10, retval);
1363 		mutex_exit(&dd->dd_lock);
1364 		DMU_TX_STAT_BUMP(dmu_tx_quota);
1365 		return (retval);
1366 	}
1367 
1368 	/* We need to up our estimated delta before dropping dd_lock */
1369 	dd->dd_tempreserved[txg & TXG_MASK] += asize;
1370 
1371 	uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
1372 	    asize - ref_rsrv);
1373 	mutex_exit(&dd->dd_lock);
1374 
1375 	tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1376 	tr->tr_ds = dd;
1377 	tr->tr_size = asize;
1378 	list_insert_tail(tr_list, tr);
1379 
1380 	/* see if it's OK with our parent */
1381 	if (dd->dd_parent != NULL && parent_rsrv != 0) {
1382 		/*
1383 		 * Recurse on our parent without recursion. This has been
1384 		 * observed to be potentially large stack usage even within
1385 		 * the test suite. Largest seen stack was 7632 bytes on linux.
1386 		 */
1387 
1388 		dd = dd->dd_parent;
1389 		asize = parent_rsrv;
1390 		ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
1391 		first = B_FALSE;
1392 		goto top_of_function;
1393 
1394 	} else {
1395 		return (0);
1396 	}
1397 }
1398 
1399 /*
1400  * Reserve space in this dsl_dir, to be used in this tx's txg.
1401  * After the space has been dirtied (and dsl_dir_willuse_space()
1402  * has been called), the reservation should be canceled, using
1403  * dsl_dir_tempreserve_clear().
1404  */
1405 int
1406 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
1407     boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx)
1408 {
1409 	int err;
1410 	list_t *tr_list;
1411 
1412 	if (asize == 0) {
1413 		*tr_cookiep = NULL;
1414 		return (0);
1415 	}
1416 
1417 	tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
1418 	list_create(tr_list, sizeof (struct tempreserve),
1419 	    offsetof(struct tempreserve, tr_node));
1420 	ASSERT3S(asize, >, 0);
1421 
1422 	err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg);
1423 	if (err == 0) {
1424 		struct tempreserve *tr;
1425 
1426 		tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1427 		tr->tr_size = lsize;
1428 		list_insert_tail(tr_list, tr);
1429 	} else {
1430 		if (err == EAGAIN) {
1431 			/*
1432 			 * If arc_memory_throttle() detected that pageout
1433 			 * is running and we are low on memory, we delay new
1434 			 * non-pageout transactions to give pageout an
1435 			 * advantage.
1436 			 *
1437 			 * It is unfortunate to be delaying while the caller's
1438 			 * locks are held.
1439 			 */
1440 			txg_delay(dd->dd_pool, tx->tx_txg,
1441 			    MSEC2NSEC(10), MSEC2NSEC(10));
1442 			err = SET_ERROR(ERESTART);
1443 		}
1444 	}
1445 
1446 	if (err == 0) {
1447 		err = dsl_dir_tempreserve_impl(dd, asize, netfree,
1448 		    B_FALSE, tr_list, tx, B_TRUE);
1449 	}
1450 
1451 	if (err != 0)
1452 		dsl_dir_tempreserve_clear(tr_list, tx);
1453 	else
1454 		*tr_cookiep = tr_list;
1455 
1456 	return (err);
1457 }
1458 
1459 /*
1460  * Clear a temporary reservation that we previously made with
1461  * dsl_dir_tempreserve_space().
1462  */
1463 void
1464 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
1465 {
1466 	int txgidx = tx->tx_txg & TXG_MASK;
1467 	list_t *tr_list = tr_cookie;
1468 	struct tempreserve *tr;
1469 
1470 	ASSERT3U(tx->tx_txg, !=, 0);
1471 
1472 	if (tr_cookie == NULL)
1473 		return;
1474 
1475 	while ((tr = list_head(tr_list)) != NULL) {
1476 		if (tr->tr_ds) {
1477 			mutex_enter(&tr->tr_ds->dd_lock);
1478 			ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
1479 			    tr->tr_size);
1480 			tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
1481 			mutex_exit(&tr->tr_ds->dd_lock);
1482 		} else {
1483 			arc_tempreserve_clear(tr->tr_size);
1484 		}
1485 		list_remove(tr_list, tr);
1486 		kmem_free(tr, sizeof (struct tempreserve));
1487 	}
1488 
1489 	kmem_free(tr_list, sizeof (list_t));
1490 }
1491 
1492 /*
1493  * This should be called from open context when we think we're going to write
1494  * or free space, for example when dirtying data. Be conservative; it's okay
1495  * to write less space or free more, but we don't want to write more or free
1496  * less than the amount specified.
1497  *
1498  * NOTE: The behavior of this function is identical to the Illumos / FreeBSD
1499  * version however it has been adjusted to use an iterative rather than
1500  * recursive algorithm to minimize stack usage.
1501  */
1502 void
1503 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1504 {
1505 	int64_t parent_space;
1506 	uint64_t est_used;
1507 
1508 	do {
1509 		mutex_enter(&dd->dd_lock);
1510 		if (space > 0)
1511 			dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
1512 
1513 		est_used = dsl_dir_space_towrite(dd) +
1514 		    dsl_dir_phys(dd)->dd_used_bytes;
1515 		parent_space = parent_delta(dd, est_used, space);
1516 		mutex_exit(&dd->dd_lock);
1517 
1518 		/* Make sure that we clean up dd_space_to* */
1519 		dsl_dir_dirty(dd, tx);
1520 
1521 		dd = dd->dd_parent;
1522 		space = parent_space;
1523 	} while (space && dd);
1524 }
1525 
1526 /* call from syncing context when we actually write/free space for this dd */
1527 void
1528 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
1529     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
1530 {
1531 	int64_t accounted_delta;
1532 
1533 	ASSERT(dmu_tx_is_syncing(tx));
1534 	ASSERT(type < DD_USED_NUM);
1535 
1536 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1537 
1538 	/*
1539 	 * dsl_dataset_set_refreservation_sync_impl() calls this with
1540 	 * dd_lock held, so that it can atomically update
1541 	 * ds->ds_reserved and the dsl_dir accounting, so that
1542 	 * dsl_dataset_check_quota() can see dataset and dir accounting
1543 	 * consistently.
1544 	 */
1545 	boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1546 	if (needlock)
1547 		mutex_enter(&dd->dd_lock);
1548 	dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1549 	accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
1550 	ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
1551 	ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
1552 	ASSERT(uncompressed >= 0 ||
1553 	    ddp->dd_uncompressed_bytes >= -uncompressed);
1554 	ddp->dd_used_bytes += used;
1555 	ddp->dd_uncompressed_bytes += uncompressed;
1556 	ddp->dd_compressed_bytes += compressed;
1557 
1558 	if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1559 		ASSERT(used >= 0 || ddp->dd_used_breakdown[type] >= -used);
1560 		ddp->dd_used_breakdown[type] += used;
1561 #ifdef ZFS_DEBUG
1562 		{
1563 			dd_used_t t;
1564 			uint64_t u = 0;
1565 			for (t = 0; t < DD_USED_NUM; t++)
1566 				u += ddp->dd_used_breakdown[t];
1567 			ASSERT3U(u, ==, ddp->dd_used_bytes);
1568 		}
1569 #endif
1570 	}
1571 	if (needlock)
1572 		mutex_exit(&dd->dd_lock);
1573 
1574 	if (dd->dd_parent != NULL) {
1575 		dsl_dir_diduse_transfer_space(dd->dd_parent,
1576 		    accounted_delta, compressed, uncompressed,
1577 		    used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1578 	}
1579 }
1580 
1581 void
1582 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
1583     dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1584 {
1585 	ASSERT(dmu_tx_is_syncing(tx));
1586 	ASSERT(oldtype < DD_USED_NUM);
1587 	ASSERT(newtype < DD_USED_NUM);
1588 
1589 	dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1590 	if (delta == 0 ||
1591 	    !(ddp->dd_flags & DD_FLAG_USED_BREAKDOWN))
1592 		return;
1593 
1594 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1595 	mutex_enter(&dd->dd_lock);
1596 	ASSERT(delta > 0 ?
1597 	    ddp->dd_used_breakdown[oldtype] >= delta :
1598 	    ddp->dd_used_breakdown[newtype] >= -delta);
1599 	ASSERT(ddp->dd_used_bytes >= ABS(delta));
1600 	ddp->dd_used_breakdown[oldtype] -= delta;
1601 	ddp->dd_used_breakdown[newtype] += delta;
1602 	mutex_exit(&dd->dd_lock);
1603 }
1604 
1605 void
1606 dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
1607     int64_t compressed, int64_t uncompressed, int64_t tonew,
1608     dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1609 {
1610 	int64_t accounted_delta;
1611 
1612 	ASSERT(dmu_tx_is_syncing(tx));
1613 	ASSERT(oldtype < DD_USED_NUM);
1614 	ASSERT(newtype < DD_USED_NUM);
1615 
1616 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1617 
1618 	mutex_enter(&dd->dd_lock);
1619 	dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1620 	accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
1621 	ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
1622 	ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
1623 	ASSERT(uncompressed >= 0 ||
1624 	    ddp->dd_uncompressed_bytes >= -uncompressed);
1625 	ddp->dd_used_bytes += used;
1626 	ddp->dd_uncompressed_bytes += uncompressed;
1627 	ddp->dd_compressed_bytes += compressed;
1628 
1629 	if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1630 		ASSERT(tonew - used <= 0 ||
1631 		    ddp->dd_used_breakdown[oldtype] >= tonew - used);
1632 		ASSERT(tonew >= 0 ||
1633 		    ddp->dd_used_breakdown[newtype] >= -tonew);
1634 		ddp->dd_used_breakdown[oldtype] -= tonew - used;
1635 		ddp->dd_used_breakdown[newtype] += tonew;
1636 #ifdef ZFS_DEBUG
1637 		{
1638 			dd_used_t t;
1639 			uint64_t u = 0;
1640 			for (t = 0; t < DD_USED_NUM; t++)
1641 				u += ddp->dd_used_breakdown[t];
1642 			ASSERT3U(u, ==, ddp->dd_used_bytes);
1643 		}
1644 #endif
1645 	}
1646 	mutex_exit(&dd->dd_lock);
1647 
1648 	if (dd->dd_parent != NULL) {
1649 		dsl_dir_diduse_transfer_space(dd->dd_parent,
1650 		    accounted_delta, compressed, uncompressed,
1651 		    used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1652 	}
1653 }
1654 
1655 typedef struct dsl_dir_set_qr_arg {
1656 	const char *ddsqra_name;
1657 	zprop_source_t ddsqra_source;
1658 	uint64_t ddsqra_value;
1659 } dsl_dir_set_qr_arg_t;
1660 
1661 static int
1662 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
1663 {
1664 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1665 	dsl_pool_t *dp = dmu_tx_pool(tx);
1666 	dsl_dataset_t *ds;
1667 	int error;
1668 	uint64_t towrite, newval;
1669 
1670 	error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1671 	if (error != 0)
1672 		return (error);
1673 
1674 	error = dsl_prop_predict(ds->ds_dir, "quota",
1675 	    ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1676 	if (error != 0) {
1677 		dsl_dataset_rele(ds, FTAG);
1678 		return (error);
1679 	}
1680 
1681 	if (newval == 0) {
1682 		dsl_dataset_rele(ds, FTAG);
1683 		return (0);
1684 	}
1685 
1686 	mutex_enter(&ds->ds_dir->dd_lock);
1687 	/*
1688 	 * If we are doing the preliminary check in open context, and
1689 	 * there are pending changes, then don't fail it, since the
1690 	 * pending changes could under-estimate the amount of space to be
1691 	 * freed up.
1692 	 */
1693 	towrite = dsl_dir_space_towrite(ds->ds_dir);
1694 	if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1695 	    (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
1696 	    newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
1697 		error = SET_ERROR(ENOSPC);
1698 	}
1699 	mutex_exit(&ds->ds_dir->dd_lock);
1700 	dsl_dataset_rele(ds, FTAG);
1701 	return (error);
1702 }
1703 
1704 static void
1705 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
1706 {
1707 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1708 	dsl_pool_t *dp = dmu_tx_pool(tx);
1709 	dsl_dataset_t *ds;
1710 	uint64_t newval;
1711 
1712 	VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1713 
1714 	if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1715 		dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
1716 		    ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1717 		    &ddsqra->ddsqra_value, tx);
1718 
1719 		VERIFY0(dsl_prop_get_int_ds(ds,
1720 		    zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
1721 	} else {
1722 		newval = ddsqra->ddsqra_value;
1723 		spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1724 		    zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
1725 	}
1726 
1727 	dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1728 	mutex_enter(&ds->ds_dir->dd_lock);
1729 	dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
1730 	mutex_exit(&ds->ds_dir->dd_lock);
1731 	dsl_dataset_rele(ds, FTAG);
1732 }
1733 
1734 int
1735 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1736 {
1737 	dsl_dir_set_qr_arg_t ddsqra;
1738 
1739 	ddsqra.ddsqra_name = ddname;
1740 	ddsqra.ddsqra_source = source;
1741 	ddsqra.ddsqra_value = quota;
1742 
1743 	return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
1744 	    dsl_dir_set_quota_sync, &ddsqra, 0,
1745 	    ZFS_SPACE_CHECK_EXTRA_RESERVED));
1746 }
1747 
1748 static int
1749 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
1750 {
1751 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1752 	dsl_pool_t *dp = dmu_tx_pool(tx);
1753 	dsl_dataset_t *ds;
1754 	dsl_dir_t *dd;
1755 	uint64_t newval, used, avail;
1756 	int error;
1757 
1758 	error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1759 	if (error != 0)
1760 		return (error);
1761 	dd = ds->ds_dir;
1762 
1763 	/*
1764 	 * If we are doing the preliminary check in open context, the
1765 	 * space estimates may be inaccurate.
1766 	 */
1767 	if (!dmu_tx_is_syncing(tx)) {
1768 		dsl_dataset_rele(ds, FTAG);
1769 		return (0);
1770 	}
1771 
1772 	error = dsl_prop_predict(ds->ds_dir,
1773 	    zfs_prop_to_name(ZFS_PROP_RESERVATION),
1774 	    ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1775 	if (error != 0) {
1776 		dsl_dataset_rele(ds, FTAG);
1777 		return (error);
1778 	}
1779 
1780 	mutex_enter(&dd->dd_lock);
1781 	used = dsl_dir_phys(dd)->dd_used_bytes;
1782 	mutex_exit(&dd->dd_lock);
1783 
1784 	if (dd->dd_parent) {
1785 		avail = dsl_dir_space_available(dd->dd_parent,
1786 		    NULL, 0, FALSE);
1787 	} else {
1788 		avail = dsl_pool_adjustedsize(dd->dd_pool,
1789 		    ZFS_SPACE_CHECK_NORMAL) - used;
1790 	}
1791 
1792 	if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
1793 		uint64_t delta = MAX(used, newval) -
1794 		    MAX(used, dsl_dir_phys(dd)->dd_reserved);
1795 
1796 		if (delta > avail ||
1797 		    (dsl_dir_phys(dd)->dd_quota > 0 &&
1798 		    newval > dsl_dir_phys(dd)->dd_quota))
1799 			error = SET_ERROR(ENOSPC);
1800 	}
1801 
1802 	dsl_dataset_rele(ds, FTAG);
1803 	return (error);
1804 }
1805 
1806 void
1807 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1808 {
1809 	uint64_t used;
1810 	int64_t delta;
1811 
1812 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1813 
1814 	mutex_enter(&dd->dd_lock);
1815 	used = dsl_dir_phys(dd)->dd_used_bytes;
1816 	delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
1817 	dsl_dir_phys(dd)->dd_reserved = value;
1818 
1819 	if (dd->dd_parent != NULL) {
1820 		/* Roll up this additional usage into our ancestors */
1821 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1822 		    delta, 0, 0, tx);
1823 	}
1824 	mutex_exit(&dd->dd_lock);
1825 }
1826 
1827 static void
1828 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
1829 {
1830 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1831 	dsl_pool_t *dp = dmu_tx_pool(tx);
1832 	dsl_dataset_t *ds;
1833 	uint64_t newval;
1834 
1835 	VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1836 
1837 	if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1838 		dsl_prop_set_sync_impl(ds,
1839 		    zfs_prop_to_name(ZFS_PROP_RESERVATION),
1840 		    ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1841 		    &ddsqra->ddsqra_value, tx);
1842 
1843 		VERIFY0(dsl_prop_get_int_ds(ds,
1844 		    zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
1845 	} else {
1846 		newval = ddsqra->ddsqra_value;
1847 		spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1848 		    zfs_prop_to_name(ZFS_PROP_RESERVATION),
1849 		    (longlong_t)newval);
1850 	}
1851 
1852 	dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
1853 	dsl_dataset_rele(ds, FTAG);
1854 }
1855 
1856 int
1857 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1858     uint64_t reservation)
1859 {
1860 	dsl_dir_set_qr_arg_t ddsqra;
1861 
1862 	ddsqra.ddsqra_name = ddname;
1863 	ddsqra.ddsqra_source = source;
1864 	ddsqra.ddsqra_value = reservation;
1865 
1866 	return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
1867 	    dsl_dir_set_reservation_sync, &ddsqra, 0,
1868 	    ZFS_SPACE_CHECK_EXTRA_RESERVED));
1869 }
1870 
1871 static dsl_dir_t *
1872 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1873 {
1874 	for (; ds1; ds1 = ds1->dd_parent) {
1875 		dsl_dir_t *dd;
1876 		for (dd = ds2; dd; dd = dd->dd_parent) {
1877 			if (ds1 == dd)
1878 				return (dd);
1879 		}
1880 	}
1881 	return (NULL);
1882 }
1883 
1884 /*
1885  * If delta is applied to dd, how much of that delta would be applied to
1886  * ancestor?  Syncing context only.
1887  */
1888 static int64_t
1889 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1890 {
1891 	if (dd == ancestor)
1892 		return (delta);
1893 
1894 	mutex_enter(&dd->dd_lock);
1895 	delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
1896 	mutex_exit(&dd->dd_lock);
1897 	return (would_change(dd->dd_parent, delta, ancestor));
1898 }
1899 
1900 typedef struct dsl_dir_rename_arg {
1901 	const char *ddra_oldname;
1902 	const char *ddra_newname;
1903 	cred_t *ddra_cred;
1904 	proc_t *ddra_proc;
1905 } dsl_dir_rename_arg_t;
1906 
1907 typedef struct dsl_valid_rename_arg {
1908 	int char_delta;
1909 	int nest_delta;
1910 } dsl_valid_rename_arg_t;
1911 
1912 static int
1913 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1914 {
1915 	(void) dp;
1916 	dsl_valid_rename_arg_t *dvra = arg;
1917 	char namebuf[ZFS_MAX_DATASET_NAME_LEN];
1918 
1919 	dsl_dataset_name(ds, namebuf);
1920 
1921 	ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN),
1922 	    <, ZFS_MAX_DATASET_NAME_LEN);
1923 	int namelen = strlen(namebuf) + dvra->char_delta;
1924 	int depth = get_dataset_depth(namebuf) + dvra->nest_delta;
1925 
1926 	if (namelen >= ZFS_MAX_DATASET_NAME_LEN)
1927 		return (SET_ERROR(ENAMETOOLONG));
1928 	if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting)
1929 		return (SET_ERROR(ENAMETOOLONG));
1930 	return (0);
1931 }
1932 
1933 static int
1934 dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
1935 {
1936 	dsl_dir_rename_arg_t *ddra = arg;
1937 	dsl_pool_t *dp = dmu_tx_pool(tx);
1938 	dsl_dir_t *dd, *newparent;
1939 	dsl_valid_rename_arg_t dvra;
1940 	dsl_dataset_t *parentds;
1941 	objset_t *parentos;
1942 	const char *mynewname;
1943 	int error;
1944 
1945 	/* target dir should exist */
1946 	error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
1947 	if (error != 0)
1948 		return (error);
1949 
1950 	/* new parent should exist */
1951 	error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
1952 	    &newparent, &mynewname);
1953 	if (error != 0) {
1954 		dsl_dir_rele(dd, FTAG);
1955 		return (error);
1956 	}
1957 
1958 	/* can't rename to different pool */
1959 	if (dd->dd_pool != newparent->dd_pool) {
1960 		dsl_dir_rele(newparent, FTAG);
1961 		dsl_dir_rele(dd, FTAG);
1962 		return (SET_ERROR(EXDEV));
1963 	}
1964 
1965 	/* new name should not already exist */
1966 	if (mynewname == NULL) {
1967 		dsl_dir_rele(newparent, FTAG);
1968 		dsl_dir_rele(dd, FTAG);
1969 		return (SET_ERROR(EEXIST));
1970 	}
1971 
1972 	/* can't rename below anything but filesystems (eg. no ZVOLs) */
1973 	error = dsl_dataset_hold_obj(newparent->dd_pool,
1974 	    dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds);
1975 	if (error != 0) {
1976 		dsl_dir_rele(newparent, FTAG);
1977 		dsl_dir_rele(dd, FTAG);
1978 		return (error);
1979 	}
1980 	error = dmu_objset_from_ds(parentds, &parentos);
1981 	if (error != 0) {
1982 		dsl_dataset_rele(parentds, FTAG);
1983 		dsl_dir_rele(newparent, FTAG);
1984 		dsl_dir_rele(dd, FTAG);
1985 		return (error);
1986 	}
1987 	if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
1988 		dsl_dataset_rele(parentds, FTAG);
1989 		dsl_dir_rele(newparent, FTAG);
1990 		dsl_dir_rele(dd, FTAG);
1991 		return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
1992 	}
1993 	dsl_dataset_rele(parentds, FTAG);
1994 
1995 	ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN),
1996 	    <, ZFS_MAX_DATASET_NAME_LEN);
1997 	ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN),
1998 	    <, ZFS_MAX_DATASET_NAME_LEN);
1999 	dvra.char_delta = strlen(ddra->ddra_newname)
2000 	    - strlen(ddra->ddra_oldname);
2001 	dvra.nest_delta = get_dataset_depth(ddra->ddra_newname)
2002 	    - get_dataset_depth(ddra->ddra_oldname);
2003 
2004 	/* if the name length is growing, validate child name lengths */
2005 	if (dvra.char_delta > 0 || dvra.nest_delta > 0) {
2006 		error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
2007 		    &dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2008 		if (error != 0) {
2009 			dsl_dir_rele(newparent, FTAG);
2010 			dsl_dir_rele(dd, FTAG);
2011 			return (error);
2012 		}
2013 	}
2014 
2015 	if (dmu_tx_is_syncing(tx)) {
2016 		if (spa_feature_is_active(dp->dp_spa,
2017 		    SPA_FEATURE_FS_SS_LIMIT)) {
2018 			/*
2019 			 * Although this is the check function and we don't
2020 			 * normally make on-disk changes in check functions,
2021 			 * we need to do that here.
2022 			 *
2023 			 * Ensure this portion of the tree's counts have been
2024 			 * initialized in case the new parent has limits set.
2025 			 */
2026 			dsl_dir_init_fs_ss_count(dd, tx);
2027 		}
2028 	}
2029 
2030 	if (newparent != dd->dd_parent) {
2031 		/* is there enough space? */
2032 		uint64_t myspace =
2033 		    MAX(dsl_dir_phys(dd)->dd_used_bytes,
2034 		    dsl_dir_phys(dd)->dd_reserved);
2035 		objset_t *os = dd->dd_pool->dp_meta_objset;
2036 		uint64_t fs_cnt = 0;
2037 		uint64_t ss_cnt = 0;
2038 
2039 		if (dsl_dir_is_zapified(dd)) {
2040 			int err;
2041 
2042 			err = zap_lookup(os, dd->dd_object,
2043 			    DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
2044 			    &fs_cnt);
2045 			if (err != ENOENT && err != 0) {
2046 				dsl_dir_rele(newparent, FTAG);
2047 				dsl_dir_rele(dd, FTAG);
2048 				return (err);
2049 			}
2050 
2051 			/*
2052 			 * have to add 1 for the filesystem itself that we're
2053 			 * moving
2054 			 */
2055 			fs_cnt++;
2056 
2057 			err = zap_lookup(os, dd->dd_object,
2058 			    DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
2059 			    &ss_cnt);
2060 			if (err != ENOENT && err != 0) {
2061 				dsl_dir_rele(newparent, FTAG);
2062 				dsl_dir_rele(dd, FTAG);
2063 				return (err);
2064 			}
2065 		}
2066 
2067 		/* check for encryption errors */
2068 		error = dsl_dir_rename_crypt_check(dd, newparent);
2069 		if (error != 0) {
2070 			dsl_dir_rele(newparent, FTAG);
2071 			dsl_dir_rele(dd, FTAG);
2072 			return (SET_ERROR(EACCES));
2073 		}
2074 
2075 		/* no rename into our descendant */
2076 		if (closest_common_ancestor(dd, newparent) == dd) {
2077 			dsl_dir_rele(newparent, FTAG);
2078 			dsl_dir_rele(dd, FTAG);
2079 			return (SET_ERROR(EINVAL));
2080 		}
2081 
2082 		error = dsl_dir_transfer_possible(dd->dd_parent,
2083 		    newparent, fs_cnt, ss_cnt, myspace,
2084 		    ddra->ddra_cred, ddra->ddra_proc);
2085 		if (error != 0) {
2086 			dsl_dir_rele(newparent, FTAG);
2087 			dsl_dir_rele(dd, FTAG);
2088 			return (error);
2089 		}
2090 	}
2091 
2092 	dsl_dir_rele(newparent, FTAG);
2093 	dsl_dir_rele(dd, FTAG);
2094 	return (0);
2095 }
2096 
2097 static void
2098 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
2099 {
2100 	dsl_dir_rename_arg_t *ddra = arg;
2101 	dsl_pool_t *dp = dmu_tx_pool(tx);
2102 	dsl_dir_t *dd, *newparent;
2103 	const char *mynewname;
2104 	objset_t *mos = dp->dp_meta_objset;
2105 
2106 	VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
2107 	VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
2108 	    &mynewname));
2109 
2110 	/* Log this before we change the name. */
2111 	spa_history_log_internal_dd(dd, "rename", tx,
2112 	    "-> %s", ddra->ddra_newname);
2113 
2114 	if (newparent != dd->dd_parent) {
2115 		objset_t *os = dd->dd_pool->dp_meta_objset;
2116 		uint64_t fs_cnt = 0;
2117 		uint64_t ss_cnt = 0;
2118 
2119 		/*
2120 		 * We already made sure the dd counts were initialized in the
2121 		 * check function.
2122 		 */
2123 		if (spa_feature_is_active(dp->dp_spa,
2124 		    SPA_FEATURE_FS_SS_LIMIT)) {
2125 			VERIFY0(zap_lookup(os, dd->dd_object,
2126 			    DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
2127 			    &fs_cnt));
2128 			/* add 1 for the filesystem itself that we're moving */
2129 			fs_cnt++;
2130 
2131 			VERIFY0(zap_lookup(os, dd->dd_object,
2132 			    DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
2133 			    &ss_cnt));
2134 		}
2135 
2136 		dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
2137 		    DD_FIELD_FILESYSTEM_COUNT, tx);
2138 		dsl_fs_ss_count_adjust(newparent, fs_cnt,
2139 		    DD_FIELD_FILESYSTEM_COUNT, tx);
2140 
2141 		dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
2142 		    DD_FIELD_SNAPSHOT_COUNT, tx);
2143 		dsl_fs_ss_count_adjust(newparent, ss_cnt,
2144 		    DD_FIELD_SNAPSHOT_COUNT, tx);
2145 
2146 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
2147 		    -dsl_dir_phys(dd)->dd_used_bytes,
2148 		    -dsl_dir_phys(dd)->dd_compressed_bytes,
2149 		    -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
2150 		dsl_dir_diduse_space(newparent, DD_USED_CHILD,
2151 		    dsl_dir_phys(dd)->dd_used_bytes,
2152 		    dsl_dir_phys(dd)->dd_compressed_bytes,
2153 		    dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
2154 
2155 		if (dsl_dir_phys(dd)->dd_reserved >
2156 		    dsl_dir_phys(dd)->dd_used_bytes) {
2157 			uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
2158 			    dsl_dir_phys(dd)->dd_used_bytes;
2159 
2160 			dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
2161 			    -unused_rsrv, 0, 0, tx);
2162 			dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
2163 			    unused_rsrv, 0, 0, tx);
2164 		}
2165 	}
2166 
2167 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
2168 
2169 	/* remove from old parent zapobj */
2170 	VERIFY0(zap_remove(mos,
2171 	    dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
2172 	    dd->dd_myname, tx));
2173 
2174 	(void) strlcpy(dd->dd_myname, mynewname,
2175 	    sizeof (dd->dd_myname));
2176 	dsl_dir_rele(dd->dd_parent, dd);
2177 	dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
2178 	VERIFY0(dsl_dir_hold_obj(dp,
2179 	    newparent->dd_object, NULL, dd, &dd->dd_parent));
2180 
2181 	/* add to new parent zapobj */
2182 	VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
2183 	    dd->dd_myname, 8, 1, &dd->dd_object, tx));
2184 
2185 	/* TODO: A rename callback to avoid these layering violations. */
2186 	zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname);
2187 	zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
2188 	    ddra->ddra_newname, B_TRUE);
2189 
2190 	dsl_prop_notify_all(dd);
2191 
2192 	dsl_dir_rele(newparent, FTAG);
2193 	dsl_dir_rele(dd, FTAG);
2194 }
2195 
2196 int
2197 dsl_dir_rename(const char *oldname, const char *newname)
2198 {
2199 	dsl_dir_rename_arg_t ddra;
2200 
2201 	ddra.ddra_oldname = oldname;
2202 	ddra.ddra_newname = newname;
2203 	ddra.ddra_cred = CRED();
2204 	ddra.ddra_proc = curproc;
2205 
2206 	return (dsl_sync_task(oldname,
2207 	    dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
2208 	    3, ZFS_SPACE_CHECK_RESERVED));
2209 }
2210 
2211 int
2212 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
2213     uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space,
2214     cred_t *cr, proc_t *proc)
2215 {
2216 	dsl_dir_t *ancestor;
2217 	int64_t adelta;
2218 	uint64_t avail;
2219 	int err;
2220 
2221 	ancestor = closest_common_ancestor(sdd, tdd);
2222 	adelta = would_change(sdd, -space, ancestor);
2223 	avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
2224 	if (avail < space)
2225 		return (SET_ERROR(ENOSPC));
2226 
2227 	err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
2228 	    ancestor, cr, proc);
2229 	if (err != 0)
2230 		return (err);
2231 	err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
2232 	    ancestor, cr, proc);
2233 	if (err != 0)
2234 		return (err);
2235 
2236 	return (0);
2237 }
2238 
2239 inode_timespec_t
2240 dsl_dir_snap_cmtime(dsl_dir_t *dd)
2241 {
2242 	inode_timespec_t t;
2243 
2244 	mutex_enter(&dd->dd_lock);
2245 	t = dd->dd_snap_cmtime;
2246 	mutex_exit(&dd->dd_lock);
2247 
2248 	return (t);
2249 }
2250 
2251 void
2252 dsl_dir_snap_cmtime_update(dsl_dir_t *dd, dmu_tx_t *tx)
2253 {
2254 	dsl_pool_t *dp = dmu_tx_pool(tx);
2255 	inode_timespec_t t;
2256 	gethrestime(&t);
2257 
2258 	mutex_enter(&dd->dd_lock);
2259 	dd->dd_snap_cmtime = t;
2260 	if (spa_feature_is_enabled(dp->dp_spa,
2261 	    SPA_FEATURE_EXTENSIBLE_DATASET)) {
2262 		objset_t *mos = dd->dd_pool->dp_meta_objset;
2263 		uint64_t ddobj = dd->dd_object;
2264 		dsl_dir_zapify(dd, tx);
2265 		VERIFY0(zap_update(mos, ddobj,
2266 		    DD_FIELD_SNAPSHOTS_CHANGED,
2267 		    sizeof (uint64_t),
2268 		    sizeof (inode_timespec_t) / sizeof (uint64_t),
2269 		    &t, tx));
2270 	}
2271 	mutex_exit(&dd->dd_lock);
2272 }
2273 
2274 void
2275 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
2276 {
2277 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2278 	dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
2279 }
2280 
2281 boolean_t
2282 dsl_dir_is_zapified(dsl_dir_t *dd)
2283 {
2284 	dmu_object_info_t doi;
2285 
2286 	dmu_object_info_from_db(dd->dd_dbuf, &doi);
2287 	return (doi.doi_type == DMU_OTN_ZAP_METADATA);
2288 }
2289 
2290 void
2291 dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj)
2292 {
2293 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2294 	ASSERT(spa_feature_is_active(dd->dd_pool->dp_spa,
2295 	    SPA_FEATURE_LIVELIST));
2296 	dsl_deadlist_open(&dd->dd_livelist, mos, obj);
2297 	bplist_create(&dd->dd_pending_allocs);
2298 	bplist_create(&dd->dd_pending_frees);
2299 }
2300 
2301 void
2302 dsl_dir_livelist_close(dsl_dir_t *dd)
2303 {
2304 	dsl_deadlist_close(&dd->dd_livelist);
2305 	bplist_destroy(&dd->dd_pending_allocs);
2306 	bplist_destroy(&dd->dd_pending_frees);
2307 }
2308 
2309 void
2310 dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total)
2311 {
2312 	uint64_t obj;
2313 	dsl_pool_t *dp = dmu_tx_pool(tx);
2314 	spa_t *spa = dp->dp_spa;
2315 	livelist_condense_entry_t to_condense = spa->spa_to_condense;
2316 
2317 	if (!dsl_deadlist_is_open(&dd->dd_livelist))
2318 		return;
2319 
2320 	/*
2321 	 * If the livelist being removed is set to be condensed, stop the
2322 	 * condense zthr and indicate the cancellation in the spa_to_condense
2323 	 * struct in case the condense no-wait synctask has already started
2324 	 */
2325 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
2326 	if (ll_condense_thread != NULL &&
2327 	    (to_condense.ds != NULL) && (to_condense.ds->ds_dir == dd)) {
2328 		/*
2329 		 * We use zthr_wait_cycle_done instead of zthr_cancel
2330 		 * because we don't want to destroy the zthr, just have
2331 		 * it skip its current task.
2332 		 */
2333 		spa->spa_to_condense.cancelled = B_TRUE;
2334 		zthr_wait_cycle_done(ll_condense_thread);
2335 		/*
2336 		 * If we've returned from zthr_wait_cycle_done without
2337 		 * clearing the to_condense data structure it's either
2338 		 * because the no-wait synctask has started (which is
2339 		 * indicated by 'syncing' field of to_condense) and we
2340 		 * can expect it to clear to_condense on its own.
2341 		 * Otherwise, we returned before the zthr ran. The
2342 		 * checkfunc will now fail as cancelled == B_TRUE so we
2343 		 * can safely NULL out ds, allowing a different dir's
2344 		 * livelist to be condensed.
2345 		 *
2346 		 * We can be sure that the to_condense struct will not
2347 		 * be repopulated at this stage because both this
2348 		 * function and dsl_livelist_try_condense execute in
2349 		 * syncing context.
2350 		 */
2351 		if ((spa->spa_to_condense.ds != NULL) &&
2352 		    !spa->spa_to_condense.syncing) {
2353 			dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf,
2354 			    spa);
2355 			spa->spa_to_condense.ds = NULL;
2356 		}
2357 	}
2358 
2359 	dsl_dir_livelist_close(dd);
2360 	VERIFY0(zap_lookup(dp->dp_meta_objset, dd->dd_object,
2361 	    DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &obj));
2362 	VERIFY0(zap_remove(dp->dp_meta_objset, dd->dd_object,
2363 	    DD_FIELD_LIVELIST, tx));
2364 	if (total) {
2365 		dsl_deadlist_free(dp->dp_meta_objset, obj, tx);
2366 		spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
2367 	}
2368 }
2369 
2370 static int
2371 dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds,
2372     zfs_wait_activity_t activity, boolean_t *in_progress)
2373 {
2374 	int error = 0;
2375 
2376 	ASSERT(MUTEX_HELD(&dd->dd_activity_lock));
2377 
2378 	switch (activity) {
2379 	case ZFS_WAIT_DELETEQ: {
2380 #ifdef _KERNEL
2381 		objset_t *os;
2382 		error = dmu_objset_from_ds(ds, &os);
2383 		if (error != 0)
2384 			break;
2385 
2386 		mutex_enter(&os->os_user_ptr_lock);
2387 		void *user = dmu_objset_get_user(os);
2388 		mutex_exit(&os->os_user_ptr_lock);
2389 		if (dmu_objset_type(os) != DMU_OST_ZFS ||
2390 		    user == NULL || zfs_get_vfs_flag_unmounted(os)) {
2391 			*in_progress = B_FALSE;
2392 			return (0);
2393 		}
2394 
2395 		uint64_t readonly = B_FALSE;
2396 		error = zfs_get_temporary_prop(ds, ZFS_PROP_READONLY, &readonly,
2397 		    NULL);
2398 
2399 		if (error != 0)
2400 			break;
2401 
2402 		if (readonly || !spa_writeable(dd->dd_pool->dp_spa)) {
2403 			*in_progress = B_FALSE;
2404 			return (0);
2405 		}
2406 
2407 		uint64_t count, unlinked_obj;
2408 		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
2409 		    &unlinked_obj);
2410 		if (error != 0) {
2411 			dsl_dataset_rele(ds, FTAG);
2412 			break;
2413 		}
2414 		error = zap_count(os, unlinked_obj, &count);
2415 
2416 		if (error == 0)
2417 			*in_progress = (count != 0);
2418 		break;
2419 #else
2420 		/*
2421 		 * The delete queue is ZPL specific, and libzpool doesn't have
2422 		 * it. It doesn't make sense to wait for it.
2423 		 */
2424 		(void) ds;
2425 		*in_progress = B_FALSE;
2426 		break;
2427 #endif
2428 	}
2429 	default:
2430 		panic("unrecognized value for activity %d", activity);
2431 	}
2432 
2433 	return (error);
2434 }
2435 
2436 int
2437 dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
2438     boolean_t *waited)
2439 {
2440 	int error = 0;
2441 	boolean_t in_progress;
2442 	dsl_pool_t *dp = dd->dd_pool;
2443 	for (;;) {
2444 		dsl_pool_config_enter(dp, FTAG);
2445 		error = dsl_dir_activity_in_progress(dd, ds, activity,
2446 		    &in_progress);
2447 		dsl_pool_config_exit(dp, FTAG);
2448 		if (error != 0 || !in_progress)
2449 			break;
2450 
2451 		*waited = B_TRUE;
2452 
2453 		if (cv_wait_sig(&dd->dd_activity_cv, &dd->dd_activity_lock) ==
2454 		    0 || dd->dd_activity_cancelled) {
2455 			error = SET_ERROR(EINTR);
2456 			break;
2457 		}
2458 	}
2459 	return (error);
2460 }
2461 
2462 void
2463 dsl_dir_cancel_waiters(dsl_dir_t *dd)
2464 {
2465 	mutex_enter(&dd->dd_activity_lock);
2466 	dd->dd_activity_cancelled = B_TRUE;
2467 	cv_broadcast(&dd->dd_activity_cv);
2468 	while (dd->dd_activity_waiters > 0)
2469 		cv_wait(&dd->dd_activity_cv, &dd->dd_activity_lock);
2470 	mutex_exit(&dd->dd_activity_lock);
2471 }
2472 
2473 #if defined(_KERNEL)
2474 EXPORT_SYMBOL(dsl_dir_set_quota);
2475 EXPORT_SYMBOL(dsl_dir_set_reservation);
2476 #endif
2477