xref: /freebsd/sys/contrib/openzfs/module/zfs/dsl_dir.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25  * Copyright (c) 2013 Martin Matuska. All rights reserved.
26  * Copyright (c) 2014 Joyent, Inc. All rights reserved.
27  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29  * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
30  * Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
31  */
32 
33 #include <sys/dmu.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_tx.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dsl_synctask.h>
40 #include <sys/dsl_deleg.h>
41 #include <sys/dmu_impl.h>
42 #include <sys/spa.h>
43 #include <sys/spa_impl.h>
44 #include <sys/metaslab.h>
45 #include <sys/zap.h>
46 #include <sys/zio.h>
47 #include <sys/arc.h>
48 #include <sys/sunddi.h>
49 #include <sys/zfeature.h>
50 #include <sys/policy.h>
51 #include <sys/zfs_vfsops.h>
52 #include <sys/zfs_znode.h>
53 #include <sys/zvol.h>
54 #include <sys/zthr.h>
55 #include "zfs_namecheck.h"
56 #include "zfs_prop.h"
57 
58 /*
59  * This controls if we verify the ZVOL quota or not.
60  * Currently, quotas are not implemented for ZVOLs.
61  * The quota size is the size of the ZVOL.
62  * The size of the volume already implies the ZVOL size quota.
63  * The quota mechanism can introduce a significant performance drop.
64  */
65 static int zvol_enforce_quotas = B_TRUE;
66 
67 /*
68  * Filesystem and Snapshot Limits
69  * ------------------------------
70  *
71  * These limits are used to restrict the number of filesystems and/or snapshots
72  * that can be created at a given level in the tree or below. A typical
73  * use-case is with a delegated dataset where the administrator wants to ensure
74  * that a user within the zone is not creating too many additional filesystems
75  * or snapshots, even though they're not exceeding their space quota.
76  *
77  * The filesystem and snapshot counts are stored as extensible properties. This
78  * capability is controlled by a feature flag and must be enabled to be used.
79  * Once enabled, the feature is not active until the first limit is set. At
80  * that point, future operations to create/destroy filesystems or snapshots
81  * will validate and update the counts.
82  *
83  * Because the count properties will not exist before the feature is active,
84  * the counts are updated when a limit is first set on an uninitialized
85  * dsl_dir node in the tree (The filesystem/snapshot count on a node includes
86  * all of the nested filesystems/snapshots. Thus, a new leaf node has a
87  * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
88  * snapshot count properties on a node indicate uninitialized counts on that
89  * node.) When first setting a limit on an uninitialized node, the code starts
90  * at the filesystem with the new limit and descends into all sub-filesystems
91  * to add the count properties.
92  *
93  * In practice this is lightweight since a limit is typically set when the
94  * filesystem is created and thus has no children. Once valid, changing the
95  * limit value won't require a re-traversal since the counts are already valid.
96  * When recursively fixing the counts, if a node with a limit is encountered
97  * during the descent, the counts are known to be valid and there is no need to
98  * descend into that filesystem's children. The counts on filesystems above the
99  * one with the new limit will still be uninitialized, unless a limit is
100  * eventually set on one of those filesystems. The counts are always recursively
101  * updated when a limit is set on a dataset, unless there is already a limit.
102  * When a new limit value is set on a filesystem with an existing limit, it is
103  * possible for the new limit to be less than the current count at that level
104  * since a user who can change the limit is also allowed to exceed the limit.
105  *
106  * Once the feature is active, then whenever a filesystem or snapshot is
107  * created, the code recurses up the tree, validating the new count against the
108  * limit at each initialized level. In practice, most levels will not have a
109  * limit set. If there is a limit at any initialized level up the tree, the
110  * check must pass or the creation will fail. Likewise, when a filesystem or
111  * snapshot is destroyed, the counts are recursively adjusted all the way up
112  * the initialized nodes in the tree. Renaming a filesystem into different point
113  * in the tree will first validate, then update the counts on each branch up to
114  * the common ancestor. A receive will also validate the counts and then update
115  * them.
116  *
117  * An exception to the above behavior is that the limit is not enforced if the
118  * user has permission to modify the limit. This is primarily so that
119  * recursive snapshots in the global zone always work. We want to prevent a
120  * denial-of-service in which a lower level delegated dataset could max out its
121  * limit and thus block recursive snapshots from being taken in the global zone.
122  * Because of this, it is possible for the snapshot count to be over the limit
123  * and snapshots taken in the global zone could cause a lower level dataset to
124  * hit or exceed its limit. The administrator taking the global zone recursive
125  * snapshot should be aware of this side-effect and behave accordingly.
126  * For consistency, the filesystem limit is also not enforced if the user can
127  * modify the limit.
128  *
129  * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
130  * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
131  * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
132  * dsl_dir_init_fs_ss_count().
133  */
134 
135 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
136 
137 typedef struct ddulrt_arg {
138 	dsl_dir_t	*ddulrta_dd;
139 	uint64_t	ddlrta_txg;
140 } ddulrt_arg_t;
141 
142 static void
dsl_dir_evict_async(void * dbu)143 dsl_dir_evict_async(void *dbu)
144 {
145 	dsl_dir_t *dd = dbu;
146 	int t;
147 	dsl_pool_t *dp __maybe_unused = dd->dd_pool;
148 
149 	dd->dd_dbuf = NULL;
150 
151 	for (t = 0; t < TXG_SIZE; t++) {
152 		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
153 		ASSERT(dd->dd_tempreserved[t] == 0);
154 		ASSERT(dd->dd_space_towrite[t] == 0);
155 	}
156 
157 	if (dd->dd_parent)
158 		dsl_dir_async_rele(dd->dd_parent, dd);
159 
160 	spa_async_close(dd->dd_pool->dp_spa, dd);
161 
162 	if (dsl_deadlist_is_open(&dd->dd_livelist))
163 		dsl_dir_livelist_close(dd);
164 
165 	dsl_prop_fini(dd);
166 	cv_destroy(&dd->dd_activity_cv);
167 	mutex_destroy(&dd->dd_activity_lock);
168 	mutex_destroy(&dd->dd_lock);
169 	kmem_free(dd, sizeof (dsl_dir_t));
170 }
171 
172 int
dsl_dir_hold_obj(dsl_pool_t * dp,uint64_t ddobj,const char * tail,const void * tag,dsl_dir_t ** ddp)173 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
174     const char *tail, const void *tag, dsl_dir_t **ddp)
175 {
176 	dmu_buf_t *dbuf;
177 	dsl_dir_t *dd;
178 	dmu_object_info_t doi;
179 	int err;
180 
181 	ASSERT(dsl_pool_config_held(dp));
182 
183 	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
184 	if (err != 0)
185 		return (err);
186 	dd = dmu_buf_get_user(dbuf);
187 
188 	dmu_object_info_from_db(dbuf, &doi);
189 	ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
190 	ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
191 
192 	if (dd == NULL) {
193 		dsl_dir_t *winner;
194 
195 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
196 		dd->dd_object = ddobj;
197 		dd->dd_dbuf = dbuf;
198 		dd->dd_pool = dp;
199 
200 		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
201 		mutex_init(&dd->dd_activity_lock, NULL, MUTEX_DEFAULT, NULL);
202 		cv_init(&dd->dd_activity_cv, NULL, CV_DEFAULT, NULL);
203 		dsl_prop_init(dd);
204 
205 		if (dsl_dir_is_zapified(dd)) {
206 			err = zap_lookup(dp->dp_meta_objset,
207 			    ddobj, DD_FIELD_CRYPTO_KEY_OBJ,
208 			    sizeof (uint64_t), 1, &dd->dd_crypto_obj);
209 			if (err == 0) {
210 				/* check for on-disk format errata */
211 				if (dsl_dir_incompatible_encryption_version(
212 				    dd)) {
213 					dp->dp_spa->spa_errata =
214 					    ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
215 				}
216 			} else if (err != ENOENT) {
217 				goto errout;
218 			}
219 		}
220 
221 		if (dsl_dir_phys(dd)->dd_parent_obj) {
222 			err = dsl_dir_hold_obj(dp,
223 			    dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
224 			    &dd->dd_parent);
225 			if (err != 0)
226 				goto errout;
227 			if (tail) {
228 #ifdef ZFS_DEBUG
229 				uint64_t foundobj;
230 
231 				err = zap_lookup(dp->dp_meta_objset,
232 				    dsl_dir_phys(dd->dd_parent)->
233 				    dd_child_dir_zapobj, tail,
234 				    sizeof (foundobj), 1, &foundobj);
235 				ASSERT(err || foundobj == ddobj);
236 #endif
237 				(void) strlcpy(dd->dd_myname, tail,
238 				    sizeof (dd->dd_myname));
239 			} else {
240 				err = zap_value_search(dp->dp_meta_objset,
241 				    dsl_dir_phys(dd->dd_parent)->
242 				    dd_child_dir_zapobj,
243 				    ddobj, 0, dd->dd_myname,
244 				    sizeof (dd->dd_myname));
245 			}
246 			if (err != 0)
247 				goto errout;
248 		} else {
249 			(void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa),
250 			    sizeof (dd->dd_myname));
251 		}
252 
253 		if (dsl_dir_is_clone(dd)) {
254 			dmu_buf_t *origin_bonus;
255 			dsl_dataset_phys_t *origin_phys;
256 
257 			/*
258 			 * We can't open the origin dataset, because
259 			 * that would require opening this dsl_dir.
260 			 * Just look at its phys directly instead.
261 			 */
262 			err = dmu_bonus_hold(dp->dp_meta_objset,
263 			    dsl_dir_phys(dd)->dd_origin_obj, FTAG,
264 			    &origin_bonus);
265 			if (err != 0)
266 				goto errout;
267 			origin_phys = origin_bonus->db_data;
268 			dd->dd_origin_txg =
269 			    origin_phys->ds_creation_txg;
270 			dmu_buf_rele(origin_bonus, FTAG);
271 			if (dsl_dir_is_zapified(dd)) {
272 				uint64_t obj;
273 				err = zap_lookup(dp->dp_meta_objset,
274 				    dd->dd_object, DD_FIELD_LIVELIST,
275 				    sizeof (uint64_t), 1, &obj);
276 				if (err == 0) {
277 					err = dsl_dir_livelist_open(dd, obj);
278 					if (err != 0)
279 						goto errout;
280 				} else if (err != ENOENT)
281 					goto errout;
282 			}
283 		}
284 
285 		if (dsl_dir_is_zapified(dd)) {
286 			inode_timespec_t t = {0};
287 			(void) zap_lookup(dp->dp_meta_objset, ddobj,
288 			    DD_FIELD_SNAPSHOTS_CHANGED,
289 			    sizeof (uint64_t),
290 			    sizeof (inode_timespec_t) / sizeof (uint64_t),
291 			    &t);
292 			dd->dd_snap_cmtime = t;
293 		}
294 
295 		dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async,
296 		    &dd->dd_dbuf);
297 		winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
298 		if (winner != NULL) {
299 			if (dd->dd_parent)
300 				dsl_dir_rele(dd->dd_parent, dd);
301 			if (dsl_deadlist_is_open(&dd->dd_livelist))
302 				dsl_dir_livelist_close(dd);
303 			dsl_prop_fini(dd);
304 			cv_destroy(&dd->dd_activity_cv);
305 			mutex_destroy(&dd->dd_activity_lock);
306 			mutex_destroy(&dd->dd_lock);
307 			kmem_free(dd, sizeof (dsl_dir_t));
308 			dd = winner;
309 		} else {
310 			spa_open_ref(dp->dp_spa, dd);
311 		}
312 	}
313 
314 	/*
315 	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
316 	 * holds on the spa.  We need the open-to-close holds because
317 	 * otherwise the spa_refcnt wouldn't change when we open a
318 	 * dir which the spa also has open, so we could incorrectly
319 	 * think it was OK to unload/export/destroy the pool.  We need
320 	 * the instantiate-to-evict hold because the dsl_dir_t has a
321 	 * pointer to the dd_pool, which has a pointer to the spa_t.
322 	 */
323 	spa_open_ref(dp->dp_spa, tag);
324 	ASSERT3P(dd->dd_pool, ==, dp);
325 	ASSERT3U(dd->dd_object, ==, ddobj);
326 	ASSERT3P(dd->dd_dbuf, ==, dbuf);
327 	*ddp = dd;
328 	return (0);
329 
330 errout:
331 	if (dd->dd_parent)
332 		dsl_dir_rele(dd->dd_parent, dd);
333 	if (dsl_deadlist_is_open(&dd->dd_livelist))
334 		dsl_dir_livelist_close(dd);
335 	dsl_prop_fini(dd);
336 	cv_destroy(&dd->dd_activity_cv);
337 	mutex_destroy(&dd->dd_activity_lock);
338 	mutex_destroy(&dd->dd_lock);
339 	kmem_free(dd, sizeof (dsl_dir_t));
340 	dmu_buf_rele(dbuf, tag);
341 	return (err);
342 }
343 
344 void
dsl_dir_rele(dsl_dir_t * dd,const void * tag)345 dsl_dir_rele(dsl_dir_t *dd, const void *tag)
346 {
347 	dprintf_dd(dd, "%s\n", "");
348 	spa_close(dd->dd_pool->dp_spa, tag);
349 	dmu_buf_rele(dd->dd_dbuf, tag);
350 }
351 
352 /*
353  * Remove a reference to the given dsl dir that is being asynchronously
354  * released.  Async releases occur from a taskq performing eviction of
355  * dsl datasets and dirs.  This process is identical to a normal release
356  * with the exception of using the async API for releasing the reference on
357  * the spa.
358  */
359 void
dsl_dir_async_rele(dsl_dir_t * dd,const void * tag)360 dsl_dir_async_rele(dsl_dir_t *dd, const void *tag)
361 {
362 	dprintf_dd(dd, "%s\n", "");
363 	spa_async_close(dd->dd_pool->dp_spa, tag);
364 	dmu_buf_rele(dd->dd_dbuf, tag);
365 }
366 
367 /* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
368 void
dsl_dir_name(dsl_dir_t * dd,char * buf)369 dsl_dir_name(dsl_dir_t *dd, char *buf)
370 {
371 	if (dd->dd_parent) {
372 		dsl_dir_name(dd->dd_parent, buf);
373 		VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
374 		    ZFS_MAX_DATASET_NAME_LEN);
375 	} else {
376 		buf[0] = '\0';
377 	}
378 	if (!MUTEX_HELD(&dd->dd_lock)) {
379 		/*
380 		 * recursive mutex so that we can use
381 		 * dprintf_dd() with dd_lock held
382 		 */
383 		mutex_enter(&dd->dd_lock);
384 		VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
385 		    <, ZFS_MAX_DATASET_NAME_LEN);
386 		mutex_exit(&dd->dd_lock);
387 	} else {
388 		VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
389 		    <, ZFS_MAX_DATASET_NAME_LEN);
390 	}
391 }
392 
393 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
394 int
dsl_dir_namelen(dsl_dir_t * dd)395 dsl_dir_namelen(dsl_dir_t *dd)
396 {
397 	int result = 0;
398 
399 	if (dd->dd_parent) {
400 		/* parent's name + 1 for the "/" */
401 		result = dsl_dir_namelen(dd->dd_parent) + 1;
402 	}
403 
404 	if (!MUTEX_HELD(&dd->dd_lock)) {
405 		/* see dsl_dir_name */
406 		mutex_enter(&dd->dd_lock);
407 		result += strlen(dd->dd_myname);
408 		mutex_exit(&dd->dd_lock);
409 	} else {
410 		result += strlen(dd->dd_myname);
411 	}
412 
413 	return (result);
414 }
415 
416 static int
getcomponent(const char * path,char * component,const char ** nextp)417 getcomponent(const char *path, char *component, const char **nextp)
418 {
419 	char *p;
420 
421 	if ((path == NULL) || (path[0] == '\0'))
422 		return (SET_ERROR(ENOENT));
423 	/* This would be a good place to reserve some namespace... */
424 	p = strpbrk(path, "/@");
425 	if (p && (p[1] == '/' || p[1] == '@')) {
426 		/* two separators in a row */
427 		return (SET_ERROR(EINVAL));
428 	}
429 	if (p == NULL || p == path) {
430 		/*
431 		 * if the first thing is an @ or /, it had better be an
432 		 * @ and it had better not have any more ats or slashes,
433 		 * and it had better have something after the @.
434 		 */
435 		if (p != NULL &&
436 		    (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
437 			return (SET_ERROR(EINVAL));
438 		if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
439 			return (SET_ERROR(ENAMETOOLONG));
440 		(void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN);
441 		p = NULL;
442 	} else if (p[0] == '/') {
443 		if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
444 			return (SET_ERROR(ENAMETOOLONG));
445 		(void) strlcpy(component, path, p - path + 1);
446 		p++;
447 	} else if (p[0] == '@') {
448 		/*
449 		 * if the next separator is an @, there better not be
450 		 * any more slashes.
451 		 */
452 		if (strchr(path, '/'))
453 			return (SET_ERROR(EINVAL));
454 		if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
455 			return (SET_ERROR(ENAMETOOLONG));
456 		(void) strlcpy(component, path, p - path + 1);
457 	} else {
458 		panic("invalid p=%p", (void *)p);
459 	}
460 	*nextp = p;
461 	return (0);
462 }
463 
464 /*
465  * Return the dsl_dir_t, and possibly the last component which couldn't
466  * be found in *tail.  The name must be in the specified dsl_pool_t.  This
467  * thread must hold the dp_config_rwlock for the pool.  Returns NULL if the
468  * path is bogus, or if tail==NULL and we couldn't parse the whole name.
469  * (*tail)[0] == '@' means that the last component is a snapshot.
470  */
471 int
dsl_dir_hold(dsl_pool_t * dp,const char * name,const void * tag,dsl_dir_t ** ddp,const char ** tailp)472 dsl_dir_hold(dsl_pool_t *dp, const char *name, const void *tag,
473     dsl_dir_t **ddp, const char **tailp)
474 {
475 	char *buf;
476 	const char *spaname, *next, *nextnext = NULL;
477 	int err;
478 	dsl_dir_t *dd;
479 	uint64_t ddobj;
480 
481 	buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
482 	err = getcomponent(name, buf, &next);
483 	if (err != 0)
484 		goto error;
485 
486 	/* Make sure the name is in the specified pool. */
487 	spaname = spa_name(dp->dp_spa);
488 	if (strcmp(buf, spaname) != 0) {
489 		err = SET_ERROR(EXDEV);
490 		goto error;
491 	}
492 
493 	ASSERT(dsl_pool_config_held(dp));
494 
495 	err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
496 	if (err != 0) {
497 		goto error;
498 	}
499 
500 	while (next != NULL) {
501 		dsl_dir_t *child_dd;
502 		err = getcomponent(next, buf, &nextnext);
503 		if (err != 0)
504 			break;
505 		ASSERT(next[0] != '\0');
506 		if (next[0] == '@')
507 			break;
508 		dprintf("looking up %s in obj%lld\n",
509 		    buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj);
510 
511 		err = zap_lookup(dp->dp_meta_objset,
512 		    dsl_dir_phys(dd)->dd_child_dir_zapobj,
513 		    buf, sizeof (ddobj), 1, &ddobj);
514 		if (err != 0) {
515 			if (err == ENOENT)
516 				err = 0;
517 			break;
518 		}
519 
520 		err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
521 		if (err != 0)
522 			break;
523 		dsl_dir_rele(dd, tag);
524 		dd = child_dd;
525 		next = nextnext;
526 	}
527 
528 	if (err != 0) {
529 		dsl_dir_rele(dd, tag);
530 		goto error;
531 	}
532 
533 	/*
534 	 * It's an error if there's more than one component left, or
535 	 * tailp==NULL and there's any component left.
536 	 */
537 	if (next != NULL &&
538 	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
539 		/* bad path name */
540 		dsl_dir_rele(dd, tag);
541 		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
542 		err = SET_ERROR(ENOENT);
543 	}
544 	if (tailp != NULL)
545 		*tailp = next;
546 	if (err == 0)
547 		*ddp = dd;
548 error:
549 	kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
550 	return (err);
551 }
552 
553 /*
554  * If the counts are already initialized for this filesystem and its
555  * descendants then do nothing, otherwise initialize the counts.
556  *
557  * The counts on this filesystem, and those below, may be uninitialized due to
558  * either the use of a pre-existing pool which did not support the
559  * filesystem/snapshot limit feature, or one in which the feature had not yet
560  * been enabled.
561  *
562  * Recursively descend the filesystem tree and update the filesystem/snapshot
563  * counts on each filesystem below, then update the cumulative count on the
564  * current filesystem. If the filesystem already has a count set on it,
565  * then we know that its counts, and the counts on the filesystems below it,
566  * are already correct, so we don't have to update this filesystem.
567  */
568 static void
dsl_dir_init_fs_ss_count(dsl_dir_t * dd,dmu_tx_t * tx)569 dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
570 {
571 	uint64_t my_fs_cnt = 0;
572 	uint64_t my_ss_cnt = 0;
573 	dsl_pool_t *dp = dd->dd_pool;
574 	objset_t *os = dp->dp_meta_objset;
575 	zap_cursor_t *zc;
576 	zap_attribute_t *za;
577 	dsl_dataset_t *ds;
578 
579 	ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
580 	ASSERT(dsl_pool_config_held(dp));
581 	ASSERT(dmu_tx_is_syncing(tx));
582 
583 	dsl_dir_zapify(dd, tx);
584 
585 	/*
586 	 * If the filesystem count has already been initialized then we
587 	 * don't need to recurse down any further.
588 	 */
589 	if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
590 		return;
591 
592 	zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
593 	za = zap_attribute_alloc();
594 
595 	/* Iterate my child dirs */
596 	for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
597 	    zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
598 		dsl_dir_t *chld_dd;
599 		uint64_t count;
600 
601 		VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
602 		    &chld_dd));
603 
604 		/*
605 		 * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets.
606 		 */
607 		if (chld_dd->dd_myname[0] == '$') {
608 			dsl_dir_rele(chld_dd, FTAG);
609 			continue;
610 		}
611 
612 		my_fs_cnt++;	/* count this child */
613 
614 		dsl_dir_init_fs_ss_count(chld_dd, tx);
615 
616 		VERIFY0(zap_lookup(os, chld_dd->dd_object,
617 		    DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
618 		my_fs_cnt += count;
619 		VERIFY0(zap_lookup(os, chld_dd->dd_object,
620 		    DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
621 		my_ss_cnt += count;
622 
623 		dsl_dir_rele(chld_dd, FTAG);
624 	}
625 	zap_cursor_fini(zc);
626 	/* Count my snapshots (we counted children's snapshots above) */
627 	VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
628 	    dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
629 
630 	for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
631 	    zap_cursor_retrieve(zc, za) == 0;
632 	    zap_cursor_advance(zc)) {
633 		/* Don't count temporary snapshots */
634 		if (za->za_name[0] != '%')
635 			my_ss_cnt++;
636 	}
637 	zap_cursor_fini(zc);
638 
639 	dsl_dataset_rele(ds, FTAG);
640 
641 	kmem_free(zc, sizeof (zap_cursor_t));
642 	zap_attribute_free(za);
643 
644 	/* we're in a sync task, update counts */
645 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
646 	VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
647 	    sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
648 	VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
649 	    sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
650 }
651 
652 static int
dsl_dir_actv_fs_ss_limit_check(void * arg,dmu_tx_t * tx)653 dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
654 {
655 	char *ddname = (char *)arg;
656 	dsl_pool_t *dp = dmu_tx_pool(tx);
657 	dsl_dataset_t *ds;
658 	dsl_dir_t *dd;
659 	int error;
660 
661 	error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
662 	if (error != 0)
663 		return (error);
664 
665 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
666 		dsl_dataset_rele(ds, FTAG);
667 		return (SET_ERROR(ENOTSUP));
668 	}
669 
670 	dd = ds->ds_dir;
671 	if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
672 	    dsl_dir_is_zapified(dd) &&
673 	    zap_contains(dp->dp_meta_objset, dd->dd_object,
674 	    DD_FIELD_FILESYSTEM_COUNT) == 0) {
675 		dsl_dataset_rele(ds, FTAG);
676 		return (SET_ERROR(EALREADY));
677 	}
678 
679 	dsl_dataset_rele(ds, FTAG);
680 	return (0);
681 }
682 
683 static void
dsl_dir_actv_fs_ss_limit_sync(void * arg,dmu_tx_t * tx)684 dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
685 {
686 	char *ddname = (char *)arg;
687 	dsl_pool_t *dp = dmu_tx_pool(tx);
688 	dsl_dataset_t *ds;
689 	spa_t *spa;
690 
691 	VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
692 
693 	spa = dsl_dataset_get_spa(ds);
694 
695 	if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
696 		/*
697 		 * Since the feature was not active and we're now setting a
698 		 * limit, increment the feature-active counter so that the
699 		 * feature becomes active for the first time.
700 		 *
701 		 * We are already in a sync task so we can update the MOS.
702 		 */
703 		spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
704 	}
705 
706 	/*
707 	 * Since we are now setting a non-UINT64_MAX limit on the filesystem,
708 	 * we need to ensure the counts are correct. Descend down the tree from
709 	 * this point and update all of the counts to be accurate.
710 	 */
711 	dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
712 
713 	dsl_dataset_rele(ds, FTAG);
714 }
715 
716 /*
717  * Make sure the feature is enabled and activate it if necessary.
718  * Since we're setting a limit, ensure the on-disk counts are valid.
719  * This is only called by the ioctl path when setting a limit value.
720  *
721  * We do not need to validate the new limit, since users who can change the
722  * limit are also allowed to exceed the limit.
723  */
724 int
dsl_dir_activate_fs_ss_limit(const char * ddname)725 dsl_dir_activate_fs_ss_limit(const char *ddname)
726 {
727 	int error;
728 
729 	error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
730 	    dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
731 	    ZFS_SPACE_CHECK_RESERVED);
732 
733 	if (error == EALREADY)
734 		error = 0;
735 
736 	return (error);
737 }
738 
739 /*
740  * Used to determine if the filesystem_limit or snapshot_limit should be
741  * enforced. We allow the limit to be exceeded if the user has permission to
742  * write the property value. We pass in the creds that we got in the open
743  * context since we will always be the GZ root in syncing context. We also have
744  * to handle the case where we are allowed to change the limit on the current
745  * dataset, but there may be another limit in the tree above.
746  *
747  * We can never modify these two properties within a non-global zone. In
748  * addition, the other checks are modeled on zfs_secpolicy_write_perms. We
749  * can't use that function since we are already holding the dp_config_rwlock.
750  * In addition, we already have the dd and dealing with snapshots is simplified
751  * in this code.
752  */
753 
754 typedef enum {
755 	ENFORCE_ALWAYS,
756 	ENFORCE_NEVER,
757 	ENFORCE_ABOVE
758 } enforce_res_t;
759 
760 static enforce_res_t
dsl_enforce_ds_ss_limits(dsl_dir_t * dd,zfs_prop_t prop,cred_t * cr,proc_t * proc)761 dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop,
762     cred_t *cr, proc_t *proc)
763 {
764 	enforce_res_t enforce = ENFORCE_ALWAYS;
765 	uint64_t obj;
766 	dsl_dataset_t *ds;
767 	uint64_t zoned;
768 	const char *zonedstr;
769 
770 	ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
771 	    prop == ZFS_PROP_SNAPSHOT_LIMIT);
772 
773 #ifdef _KERNEL
774 	if (crgetzoneid(cr) != GLOBAL_ZONEID)
775 		return (ENFORCE_ALWAYS);
776 
777 	/*
778 	 * We are checking the saved credentials of the user process, which is
779 	 * not the current process.  Note that we can't use secpolicy_zfs(),
780 	 * because it only works if the cred is that of the current process (on
781 	 * Linux).
782 	 */
783 	if (secpolicy_zfs_proc(cr, proc) == 0)
784 		return (ENFORCE_NEVER);
785 #else
786 	(void) proc;
787 #endif
788 
789 	if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
790 		return (ENFORCE_ALWAYS);
791 
792 	ASSERT(dsl_pool_config_held(dd->dd_pool));
793 
794 	if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
795 		return (ENFORCE_ALWAYS);
796 
797 	zonedstr = zfs_prop_to_name(ZFS_PROP_ZONED);
798 	if (dsl_prop_get_ds(ds, zonedstr, 8, 1, &zoned, NULL) || zoned) {
799 		/* Only root can access zoned fs's from the GZ */
800 		enforce = ENFORCE_ALWAYS;
801 	} else {
802 		if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
803 			enforce = ENFORCE_ABOVE;
804 	}
805 
806 	dsl_dataset_rele(ds, FTAG);
807 	return (enforce);
808 }
809 
810 /*
811  * Check if adding additional child filesystem(s) would exceed any filesystem
812  * limits or adding additional snapshot(s) would exceed any snapshot limits.
813  * The prop argument indicates which limit to check.
814  *
815  * Note that all filesystem limits up to the root (or the highest
816  * initialized) filesystem or the given ancestor must be satisfied.
817  */
818 int
dsl_fs_ss_limit_check(dsl_dir_t * dd,uint64_t delta,zfs_prop_t prop,dsl_dir_t * ancestor,cred_t * cr,proc_t * proc)819 dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
820     dsl_dir_t *ancestor, cred_t *cr, proc_t *proc)
821 {
822 	objset_t *os = dd->dd_pool->dp_meta_objset;
823 	uint64_t limit, count;
824 	const char *count_prop;
825 	enforce_res_t enforce;
826 	int err = 0;
827 
828 	ASSERT(dsl_pool_config_held(dd->dd_pool));
829 	ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
830 	    prop == ZFS_PROP_SNAPSHOT_LIMIT);
831 
832 	if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
833 		/*
834 		 * We don't enforce the limit for temporary snapshots. This is
835 		 * indicated by a NULL cred_t argument.
836 		 */
837 		if (cr == NULL)
838 			return (0);
839 
840 		count_prop = DD_FIELD_SNAPSHOT_COUNT;
841 	} else {
842 		count_prop = DD_FIELD_FILESYSTEM_COUNT;
843 	}
844 	/*
845 	 * If we're allowed to change the limit, don't enforce the limit
846 	 * e.g. this can happen if a snapshot is taken by an administrative
847 	 * user in the global zone (i.e. a recursive snapshot by root).
848 	 * However, we must handle the case of delegated permissions where we
849 	 * are allowed to change the limit on the current dataset, but there
850 	 * is another limit in the tree above.
851 	 */
852 	enforce = dsl_enforce_ds_ss_limits(dd, prop, cr, proc);
853 	if (enforce == ENFORCE_NEVER)
854 		return (0);
855 
856 	/*
857 	 * e.g. if renaming a dataset with no snapshots, count adjustment
858 	 * is 0.
859 	 */
860 	if (delta == 0)
861 		return (0);
862 
863 	/*
864 	 * If an ancestor has been provided, stop checking the limit once we
865 	 * hit that dir. We need this during rename so that we don't overcount
866 	 * the check once we recurse up to the common ancestor.
867 	 */
868 	if (ancestor == dd)
869 		return (0);
870 
871 	/*
872 	 * If we hit an uninitialized node while recursing up the tree, we can
873 	 * stop since we know there is no limit here (or above). The counts are
874 	 * not valid on this node and we know we won't touch this node's counts.
875 	 */
876 	if (!dsl_dir_is_zapified(dd))
877 		return (0);
878 	err = zap_lookup(os, dd->dd_object,
879 	    count_prop, sizeof (count), 1, &count);
880 	if (err == ENOENT)
881 		return (0);
882 	if (err != 0)
883 		return (err);
884 
885 	err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
886 	    B_FALSE);
887 	if (err != 0)
888 		return (err);
889 
890 	/* Is there a limit which we've hit? */
891 	if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
892 		return (SET_ERROR(EDQUOT));
893 
894 	if (dd->dd_parent != NULL)
895 		err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
896 		    ancestor, cr, proc);
897 
898 	return (err);
899 }
900 
901 /*
902  * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
903  * parents. When a new filesystem/snapshot is created, increment the count on
904  * all parents, and when a filesystem/snapshot is destroyed, decrement the
905  * count.
906  */
907 void
dsl_fs_ss_count_adjust(dsl_dir_t * dd,int64_t delta,const char * prop,dmu_tx_t * tx)908 dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
909     dmu_tx_t *tx)
910 {
911 	int err;
912 	objset_t *os = dd->dd_pool->dp_meta_objset;
913 	uint64_t count;
914 
915 	ASSERT(dsl_pool_config_held(dd->dd_pool));
916 	ASSERT(dmu_tx_is_syncing(tx));
917 	ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
918 	    strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
919 
920 	/*
921 	 * We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets.
922 	 */
923 	if (dd->dd_myname[0] == '$' && strcmp(prop,
924 	    DD_FIELD_FILESYSTEM_COUNT) == 0) {
925 		return;
926 	}
927 
928 	/*
929 	 * e.g. if renaming a dataset with no snapshots, count adjustment is 0
930 	 */
931 	if (delta == 0)
932 		return;
933 
934 	/*
935 	 * If we hit an uninitialized node while recursing up the tree, we can
936 	 * stop since we know the counts are not valid on this node and we
937 	 * know we shouldn't touch this node's counts. An uninitialized count
938 	 * on the node indicates that either the feature has not yet been
939 	 * activated or there are no limits on this part of the tree.
940 	 */
941 	if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
942 	    prop, sizeof (count), 1, &count)) == ENOENT)
943 		return;
944 	VERIFY0(err);
945 
946 	count += delta;
947 	/* Use a signed verify to make sure we're not neg. */
948 	VERIFY3S(count, >=, 0);
949 
950 	VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
951 	    tx));
952 
953 	/* Roll up this additional count into our ancestors */
954 	if (dd->dd_parent != NULL)
955 		dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
956 }
957 
958 uint64_t
dsl_dir_create_sync(dsl_pool_t * dp,dsl_dir_t * pds,const char * name,dmu_tx_t * tx)959 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
960     dmu_tx_t *tx)
961 {
962 	objset_t *mos = dp->dp_meta_objset;
963 	uint64_t ddobj;
964 	dsl_dir_phys_t *ddphys;
965 	dmu_buf_t *dbuf;
966 
967 	ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
968 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
969 	if (pds) {
970 		VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
971 		    name, sizeof (uint64_t), 1, &ddobj, tx));
972 	} else {
973 		/* it's the root dir */
974 		VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
975 		    DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
976 	}
977 	VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
978 	dmu_buf_will_dirty(dbuf, tx);
979 	ddphys = dbuf->db_data;
980 
981 	ddphys->dd_creation_time = gethrestime_sec();
982 	if (pds) {
983 		ddphys->dd_parent_obj = pds->dd_object;
984 
985 		/* update the filesystem counts */
986 		dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
987 	}
988 	ddphys->dd_props_zapobj = zap_create(mos,
989 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
990 	ddphys->dd_child_dir_zapobj = zap_create(mos,
991 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
992 	if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
993 		ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
994 
995 	dmu_buf_rele(dbuf, FTAG);
996 
997 	return (ddobj);
998 }
999 
1000 boolean_t
dsl_dir_is_clone(dsl_dir_t * dd)1001 dsl_dir_is_clone(dsl_dir_t *dd)
1002 {
1003 	return (dsl_dir_phys(dd)->dd_origin_obj &&
1004 	    (dd->dd_pool->dp_origin_snap == NULL ||
1005 	    dsl_dir_phys(dd)->dd_origin_obj !=
1006 	    dd->dd_pool->dp_origin_snap->ds_object));
1007 }
1008 
1009 uint64_t
dsl_dir_get_used(dsl_dir_t * dd)1010 dsl_dir_get_used(dsl_dir_t *dd)
1011 {
1012 	return (dsl_dir_phys(dd)->dd_used_bytes);
1013 }
1014 
1015 uint64_t
dsl_dir_get_compressed(dsl_dir_t * dd)1016 dsl_dir_get_compressed(dsl_dir_t *dd)
1017 {
1018 	return (dsl_dir_phys(dd)->dd_compressed_bytes);
1019 }
1020 
1021 uint64_t
dsl_dir_get_quota(dsl_dir_t * dd)1022 dsl_dir_get_quota(dsl_dir_t *dd)
1023 {
1024 	return (dsl_dir_phys(dd)->dd_quota);
1025 }
1026 
1027 uint64_t
dsl_dir_get_reservation(dsl_dir_t * dd)1028 dsl_dir_get_reservation(dsl_dir_t *dd)
1029 {
1030 	return (dsl_dir_phys(dd)->dd_reserved);
1031 }
1032 
1033 uint64_t
dsl_dir_get_compressratio(dsl_dir_t * dd)1034 dsl_dir_get_compressratio(dsl_dir_t *dd)
1035 {
1036 	/* a fixed point number, 100x the ratio */
1037 	return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
1038 	    (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
1039 	    dsl_dir_phys(dd)->dd_compressed_bytes));
1040 }
1041 
1042 uint64_t
dsl_dir_get_logicalused(dsl_dir_t * dd)1043 dsl_dir_get_logicalused(dsl_dir_t *dd)
1044 {
1045 	return (dsl_dir_phys(dd)->dd_uncompressed_bytes);
1046 }
1047 
1048 uint64_t
dsl_dir_get_usedsnap(dsl_dir_t * dd)1049 dsl_dir_get_usedsnap(dsl_dir_t *dd)
1050 {
1051 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
1052 }
1053 
1054 uint64_t
dsl_dir_get_usedds(dsl_dir_t * dd)1055 dsl_dir_get_usedds(dsl_dir_t *dd)
1056 {
1057 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
1058 }
1059 
1060 uint64_t
dsl_dir_get_usedrefreserv(dsl_dir_t * dd)1061 dsl_dir_get_usedrefreserv(dsl_dir_t *dd)
1062 {
1063 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
1064 }
1065 
1066 uint64_t
dsl_dir_get_usedchild(dsl_dir_t * dd)1067 dsl_dir_get_usedchild(dsl_dir_t *dd)
1068 {
1069 	return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
1070 	    dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
1071 }
1072 
1073 void
dsl_dir_get_origin(dsl_dir_t * dd,char * buf)1074 dsl_dir_get_origin(dsl_dir_t *dd, char *buf)
1075 {
1076 	dsl_dataset_t *ds;
1077 	VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
1078 	    dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
1079 
1080 	dsl_dataset_name(ds, buf);
1081 
1082 	dsl_dataset_rele(ds, FTAG);
1083 }
1084 
1085 int
dsl_dir_get_filesystem_count(dsl_dir_t * dd,uint64_t * count)1086 dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
1087 {
1088 	if (dsl_dir_is_zapified(dd)) {
1089 		objset_t *os = dd->dd_pool->dp_meta_objset;
1090 		return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
1091 		    sizeof (*count), 1, count));
1092 	} else {
1093 		return (SET_ERROR(ENOENT));
1094 	}
1095 }
1096 
1097 int
dsl_dir_get_snapshot_count(dsl_dir_t * dd,uint64_t * count)1098 dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
1099 {
1100 	if (dsl_dir_is_zapified(dd)) {
1101 		objset_t *os = dd->dd_pool->dp_meta_objset;
1102 		return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
1103 		    sizeof (*count), 1, count));
1104 	} else {
1105 		return (SET_ERROR(ENOENT));
1106 	}
1107 }
1108 
1109 void
dsl_dir_stats(dsl_dir_t * dd,nvlist_t * nv)1110 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
1111 {
1112 	mutex_enter(&dd->dd_lock);
1113 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
1114 	    dsl_dir_get_quota(dd));
1115 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
1116 	    dsl_dir_get_reservation(dd));
1117 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
1118 	    dsl_dir_get_logicalused(dd));
1119 	if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1120 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
1121 		    dsl_dir_get_usedsnap(dd));
1122 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
1123 		    dsl_dir_get_usedds(dd));
1124 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
1125 		    dsl_dir_get_usedrefreserv(dd));
1126 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
1127 		    dsl_dir_get_usedchild(dd));
1128 	}
1129 	mutex_exit(&dd->dd_lock);
1130 
1131 	uint64_t count;
1132 	if (dsl_dir_get_filesystem_count(dd, &count) == 0) {
1133 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT,
1134 		    count);
1135 	}
1136 	if (dsl_dir_get_snapshot_count(dd, &count) == 0) {
1137 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
1138 		    count);
1139 	}
1140 
1141 	if (dsl_dir_is_clone(dd)) {
1142 		char buf[ZFS_MAX_DATASET_NAME_LEN];
1143 		dsl_dir_get_origin(dd, buf);
1144 		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
1145 	}
1146 
1147 }
1148 
1149 void
dsl_dir_dirty(dsl_dir_t * dd,dmu_tx_t * tx)1150 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
1151 {
1152 	dsl_pool_t *dp = dd->dd_pool;
1153 
1154 	ASSERT(dsl_dir_phys(dd));
1155 
1156 	if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
1157 		/* up the hold count until we can be written out */
1158 		dmu_buf_add_ref(dd->dd_dbuf, dd);
1159 	}
1160 }
1161 
1162 static int64_t
parent_delta(dsl_dir_t * dd,uint64_t used,int64_t delta)1163 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
1164 {
1165 	uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
1166 	uint64_t new_accounted =
1167 	    MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
1168 	return (new_accounted - old_accounted);
1169 }
1170 
1171 void
dsl_dir_sync(dsl_dir_t * dd,dmu_tx_t * tx)1172 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
1173 {
1174 	ASSERT(dmu_tx_is_syncing(tx));
1175 
1176 	mutex_enter(&dd->dd_lock);
1177 	ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]);
1178 	dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg,
1179 	    (u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
1180 	dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0;
1181 	mutex_exit(&dd->dd_lock);
1182 
1183 	/* release the hold from dsl_dir_dirty */
1184 	dmu_buf_rele(dd->dd_dbuf, dd);
1185 }
1186 
1187 static uint64_t
dsl_dir_space_towrite(dsl_dir_t * dd)1188 dsl_dir_space_towrite(dsl_dir_t *dd)
1189 {
1190 	uint64_t space = 0;
1191 
1192 	ASSERT(MUTEX_HELD(&dd->dd_lock));
1193 
1194 	for (int i = 0; i < TXG_SIZE; i++)
1195 		space += dd->dd_space_towrite[i & TXG_MASK];
1196 
1197 	return (space);
1198 }
1199 
1200 /*
1201  * How much space would dd have available if ancestor had delta applied
1202  * to it?  If ondiskonly is set, we're only interested in what's
1203  * on-disk, not estimated pending changes.
1204  */
1205 uint64_t
dsl_dir_space_available(dsl_dir_t * dd,dsl_dir_t * ancestor,int64_t delta,int ondiskonly)1206 dsl_dir_space_available(dsl_dir_t *dd,
1207     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
1208 {
1209 	uint64_t parentspace, myspace, quota, used;
1210 
1211 	/*
1212 	 * If there are no restrictions otherwise, assume we have
1213 	 * unlimited space available.
1214 	 */
1215 	quota = UINT64_MAX;
1216 	parentspace = UINT64_MAX;
1217 
1218 	if (dd->dd_parent != NULL) {
1219 		parentspace = dsl_dir_space_available(dd->dd_parent,
1220 		    ancestor, delta, ondiskonly);
1221 	}
1222 
1223 	mutex_enter(&dd->dd_lock);
1224 	if (dsl_dir_phys(dd)->dd_quota != 0)
1225 		quota = dsl_dir_phys(dd)->dd_quota;
1226 	used = dsl_dir_phys(dd)->dd_used_bytes;
1227 	if (!ondiskonly)
1228 		used += dsl_dir_space_towrite(dd);
1229 
1230 	if (dd->dd_parent == NULL) {
1231 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool,
1232 		    ZFS_SPACE_CHECK_NORMAL);
1233 		quota = MIN(quota, poolsize);
1234 	}
1235 
1236 	if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
1237 		/*
1238 		 * We have some space reserved, in addition to what our
1239 		 * parent gave us.
1240 		 */
1241 		parentspace += dsl_dir_phys(dd)->dd_reserved - used;
1242 	}
1243 
1244 	if (dd == ancestor) {
1245 		ASSERT(delta <= 0);
1246 		ASSERT(used >= -delta);
1247 		used += delta;
1248 		if (parentspace != UINT64_MAX)
1249 			parentspace -= delta;
1250 	}
1251 
1252 	if (used > quota) {
1253 		/* over quota */
1254 		myspace = 0;
1255 	} else {
1256 		/*
1257 		 * the lesser of the space provided by our parent and
1258 		 * the space left in our quota
1259 		 */
1260 		myspace = MIN(parentspace, quota - used);
1261 	}
1262 
1263 	mutex_exit(&dd->dd_lock);
1264 
1265 	return (myspace);
1266 }
1267 
1268 struct tempreserve {
1269 	list_node_t tr_node;
1270 	dsl_dir_t *tr_ds;
1271 	uint64_t tr_size;
1272 };
1273 
1274 static int
dsl_dir_tempreserve_impl(dsl_dir_t * dd,uint64_t asize,boolean_t netfree,boolean_t ignorequota,list_t * tr_list,dmu_tx_t * tx,boolean_t first)1275 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
1276     boolean_t ignorequota, list_t *tr_list,
1277     dmu_tx_t *tx, boolean_t first)
1278 {
1279 	uint64_t txg;
1280 	uint64_t quota;
1281 	struct tempreserve *tr;
1282 	int retval;
1283 	uint64_t ext_quota;
1284 	uint64_t ref_rsrv;
1285 
1286 top_of_function:
1287 	txg = tx->tx_txg;
1288 	retval = EDQUOT;
1289 	ref_rsrv = 0;
1290 
1291 	ASSERT3U(txg, !=, 0);
1292 	ASSERT3S(asize, >, 0);
1293 
1294 	mutex_enter(&dd->dd_lock);
1295 
1296 	/*
1297 	 * Check against the dsl_dir's quota.  We don't add in the delta
1298 	 * when checking for over-quota because they get one free hit.
1299 	 */
1300 	uint64_t est_inflight = dsl_dir_space_towrite(dd);
1301 	for (int i = 0; i < TXG_SIZE; i++)
1302 		est_inflight += dd->dd_tempreserved[i];
1303 	uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
1304 
1305 	/*
1306 	 * On the first iteration, fetch the dataset's used-on-disk and
1307 	 * refreservation values. Also, if checkrefquota is set, test if
1308 	 * allocating this space would exceed the dataset's refquota.
1309 	 */
1310 	if (first && tx->tx_objset) {
1311 		int error;
1312 		dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
1313 
1314 		error = dsl_dataset_check_quota(ds, !netfree,
1315 		    asize, est_inflight, &used_on_disk, &ref_rsrv);
1316 		if (error != 0) {
1317 			mutex_exit(&dd->dd_lock);
1318 			DMU_TX_STAT_BUMP(dmu_tx_quota);
1319 			return (error);
1320 		}
1321 	}
1322 
1323 	/*
1324 	 * If this transaction will result in a net free of space,
1325 	 * we want to let it through.
1326 	 */
1327 	if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0 ||
1328 	    (tx->tx_objset && dmu_objset_type(tx->tx_objset) == DMU_OST_ZVOL &&
1329 	    zvol_enforce_quotas == B_FALSE))
1330 		quota = UINT64_MAX;
1331 	else
1332 		quota = dsl_dir_phys(dd)->dd_quota;
1333 
1334 	/*
1335 	 * Adjust the quota against the actual pool size at the root
1336 	 * minus any outstanding deferred frees.
1337 	 * To ensure that it's possible to remove files from a full
1338 	 * pool without inducing transient overcommits, we throttle
1339 	 * netfree transactions against a quota that is slightly larger,
1340 	 * but still within the pool's allocation slop.  In cases where
1341 	 * we're very close to full, this will allow a steady trickle of
1342 	 * removes to get through.
1343 	 */
1344 	if (dd->dd_parent == NULL) {
1345 		uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool,
1346 		    (netfree) ?
1347 		    ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL);
1348 
1349 		if (avail < quota) {
1350 			quota = avail;
1351 			retval = SET_ERROR(ENOSPC);
1352 		}
1353 	}
1354 
1355 	/*
1356 	 * If they are requesting more space, and our current estimate
1357 	 * is over quota, they get to try again unless the actual
1358 	 * on-disk is over quota and there are no pending changes
1359 	 * or deferred frees (which may free up space for us).
1360 	 */
1361 	ext_quota = quota >> 5;
1362 	if (quota == UINT64_MAX)
1363 		ext_quota = 0;
1364 
1365 	if (used_on_disk >= quota) {
1366 		if (retval == ENOSPC && (used_on_disk - quota) <
1367 		    dsl_pool_deferred_space(dd->dd_pool)) {
1368 			retval = SET_ERROR(ERESTART);
1369 		}
1370 		/* Quota exceeded */
1371 		mutex_exit(&dd->dd_lock);
1372 		DMU_TX_STAT_BUMP(dmu_tx_quota);
1373 		return (retval);
1374 	} else if (used_on_disk + est_inflight >= quota + ext_quota) {
1375 		dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
1376 		    "quota=%lluK tr=%lluK\n",
1377 		    (u_longlong_t)used_on_disk>>10,
1378 		    (u_longlong_t)est_inflight>>10,
1379 		    (u_longlong_t)quota>>10, (u_longlong_t)asize>>10);
1380 		mutex_exit(&dd->dd_lock);
1381 		DMU_TX_STAT_BUMP(dmu_tx_quota);
1382 		return (SET_ERROR(ERESTART));
1383 	}
1384 
1385 	/* We need to up our estimated delta before dropping dd_lock */
1386 	dd->dd_tempreserved[txg & TXG_MASK] += asize;
1387 
1388 	uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
1389 	    asize - ref_rsrv);
1390 	mutex_exit(&dd->dd_lock);
1391 
1392 	tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1393 	tr->tr_ds = dd;
1394 	tr->tr_size = asize;
1395 	list_insert_tail(tr_list, tr);
1396 
1397 	/* see if it's OK with our parent */
1398 	if (dd->dd_parent != NULL && parent_rsrv != 0) {
1399 		/*
1400 		 * Recurse on our parent without recursion. This has been
1401 		 * observed to be potentially large stack usage even within
1402 		 * the test suite. Largest seen stack was 7632 bytes on linux.
1403 		 */
1404 
1405 		dd = dd->dd_parent;
1406 		asize = parent_rsrv;
1407 		ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
1408 		first = B_FALSE;
1409 		goto top_of_function;
1410 	}
1411 
1412 	return (0);
1413 }
1414 
1415 /*
1416  * Reserve space in this dsl_dir, to be used in this tx's txg.
1417  * After the space has been dirtied (and dsl_dir_willuse_space()
1418  * has been called), the reservation should be canceled, using
1419  * dsl_dir_tempreserve_clear().
1420  */
1421 int
dsl_dir_tempreserve_space(dsl_dir_t * dd,uint64_t lsize,uint64_t asize,boolean_t netfree,void ** tr_cookiep,dmu_tx_t * tx)1422 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
1423     boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx)
1424 {
1425 	int err;
1426 	list_t *tr_list;
1427 
1428 	if (asize == 0) {
1429 		*tr_cookiep = NULL;
1430 		return (0);
1431 	}
1432 
1433 	tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
1434 	list_create(tr_list, sizeof (struct tempreserve),
1435 	    offsetof(struct tempreserve, tr_node));
1436 	ASSERT3S(asize, >, 0);
1437 
1438 	err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg);
1439 	if (err == 0) {
1440 		struct tempreserve *tr;
1441 
1442 		tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1443 		tr->tr_size = lsize;
1444 		list_insert_tail(tr_list, tr);
1445 	} else {
1446 		if (err == EAGAIN) {
1447 			/*
1448 			 * If arc_memory_throttle() detected that pageout
1449 			 * is running and we are low on memory, we delay new
1450 			 * non-pageout transactions to give pageout an
1451 			 * advantage.
1452 			 *
1453 			 * It is unfortunate to be delaying while the caller's
1454 			 * locks are held.
1455 			 */
1456 			txg_delay(dd->dd_pool, tx->tx_txg,
1457 			    MSEC2NSEC(10), MSEC2NSEC(10));
1458 			err = SET_ERROR(ERESTART);
1459 		}
1460 	}
1461 
1462 	if (err == 0) {
1463 		err = dsl_dir_tempreserve_impl(dd, asize, netfree,
1464 		    B_FALSE, tr_list, tx, B_TRUE);
1465 	}
1466 
1467 	if (err != 0)
1468 		dsl_dir_tempreserve_clear(tr_list, tx);
1469 	else
1470 		*tr_cookiep = tr_list;
1471 
1472 	return (err);
1473 }
1474 
1475 /*
1476  * Clear a temporary reservation that we previously made with
1477  * dsl_dir_tempreserve_space().
1478  */
1479 void
dsl_dir_tempreserve_clear(void * tr_cookie,dmu_tx_t * tx)1480 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
1481 {
1482 	int txgidx = tx->tx_txg & TXG_MASK;
1483 	list_t *tr_list = tr_cookie;
1484 	struct tempreserve *tr;
1485 
1486 	ASSERT3U(tx->tx_txg, !=, 0);
1487 
1488 	if (tr_cookie == NULL)
1489 		return;
1490 
1491 	while ((tr = list_remove_head(tr_list)) != NULL) {
1492 		if (tr->tr_ds) {
1493 			mutex_enter(&tr->tr_ds->dd_lock);
1494 			ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
1495 			    tr->tr_size);
1496 			tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
1497 			mutex_exit(&tr->tr_ds->dd_lock);
1498 		} else {
1499 			arc_tempreserve_clear(tr->tr_size);
1500 		}
1501 		kmem_free(tr, sizeof (struct tempreserve));
1502 	}
1503 
1504 	kmem_free(tr_list, sizeof (list_t));
1505 }
1506 
1507 /*
1508  * This should be called from open context when we think we're going to write
1509  * or free space, for example when dirtying data. Be conservative; it's okay
1510  * to write less space or free more, but we don't want to write more or free
1511  * less than the amount specified.
1512  *
1513  * NOTE: The behavior of this function is identical to the Illumos / FreeBSD
1514  * version however it has been adjusted to use an iterative rather than
1515  * recursive algorithm to minimize stack usage.
1516  */
1517 void
dsl_dir_willuse_space(dsl_dir_t * dd,int64_t space,dmu_tx_t * tx)1518 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1519 {
1520 	int64_t parent_space;
1521 	uint64_t est_used;
1522 
1523 	do {
1524 		mutex_enter(&dd->dd_lock);
1525 		if (space > 0)
1526 			dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
1527 
1528 		est_used = dsl_dir_space_towrite(dd) +
1529 		    dsl_dir_phys(dd)->dd_used_bytes;
1530 		parent_space = parent_delta(dd, est_used, space);
1531 		mutex_exit(&dd->dd_lock);
1532 
1533 		/* Make sure that we clean up dd_space_to* */
1534 		dsl_dir_dirty(dd, tx);
1535 
1536 		dd = dd->dd_parent;
1537 		space = parent_space;
1538 	} while (space && dd);
1539 }
1540 
1541 /* call from syncing context when we actually write/free space for this dd */
1542 void
dsl_dir_diduse_space(dsl_dir_t * dd,dd_used_t type,int64_t used,int64_t compressed,int64_t uncompressed,dmu_tx_t * tx)1543 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
1544     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
1545 {
1546 	int64_t accounted_delta;
1547 
1548 	ASSERT(dmu_tx_is_syncing(tx));
1549 	ASSERT(type < DD_USED_NUM);
1550 
1551 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1552 
1553 	/*
1554 	 * dsl_dataset_set_refreservation_sync_impl() calls this with
1555 	 * dd_lock held, so that it can atomically update
1556 	 * ds->ds_reserved and the dsl_dir accounting, so that
1557 	 * dsl_dataset_check_quota() can see dataset and dir accounting
1558 	 * consistently.
1559 	 */
1560 	boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1561 	if (needlock)
1562 		mutex_enter(&dd->dd_lock);
1563 	dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1564 	accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
1565 	ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
1566 	ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
1567 	ASSERT(uncompressed >= 0 ||
1568 	    ddp->dd_uncompressed_bytes >= -uncompressed);
1569 	ddp->dd_used_bytes += used;
1570 	ddp->dd_uncompressed_bytes += uncompressed;
1571 	ddp->dd_compressed_bytes += compressed;
1572 
1573 	if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1574 		ASSERT(used >= 0 || ddp->dd_used_breakdown[type] >= -used);
1575 		ddp->dd_used_breakdown[type] += used;
1576 #ifdef ZFS_DEBUG
1577 		{
1578 			dd_used_t t;
1579 			uint64_t u = 0;
1580 			for (t = 0; t < DD_USED_NUM; t++)
1581 				u += ddp->dd_used_breakdown[t];
1582 			ASSERT3U(u, ==, ddp->dd_used_bytes);
1583 		}
1584 #endif
1585 	}
1586 	if (needlock)
1587 		mutex_exit(&dd->dd_lock);
1588 
1589 	if (dd->dd_parent != NULL) {
1590 		dsl_dir_diduse_transfer_space(dd->dd_parent,
1591 		    accounted_delta, compressed, uncompressed,
1592 		    used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1593 	}
1594 }
1595 
1596 void
dsl_dir_transfer_space(dsl_dir_t * dd,int64_t delta,dd_used_t oldtype,dd_used_t newtype,dmu_tx_t * tx)1597 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
1598     dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1599 {
1600 	ASSERT(dmu_tx_is_syncing(tx));
1601 	ASSERT(oldtype < DD_USED_NUM);
1602 	ASSERT(newtype < DD_USED_NUM);
1603 
1604 	dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1605 	if (delta == 0 ||
1606 	    !(ddp->dd_flags & DD_FLAG_USED_BREAKDOWN))
1607 		return;
1608 
1609 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1610 	mutex_enter(&dd->dd_lock);
1611 	ASSERT(delta > 0 ?
1612 	    ddp->dd_used_breakdown[oldtype] >= delta :
1613 	    ddp->dd_used_breakdown[newtype] >= -delta);
1614 	ASSERT(ddp->dd_used_bytes >= ABS(delta));
1615 	ddp->dd_used_breakdown[oldtype] -= delta;
1616 	ddp->dd_used_breakdown[newtype] += delta;
1617 	mutex_exit(&dd->dd_lock);
1618 }
1619 
1620 void
dsl_dir_diduse_transfer_space(dsl_dir_t * dd,int64_t used,int64_t compressed,int64_t uncompressed,int64_t tonew,dd_used_t oldtype,dd_used_t newtype,dmu_tx_t * tx)1621 dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
1622     int64_t compressed, int64_t uncompressed, int64_t tonew,
1623     dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1624 {
1625 	int64_t accounted_delta;
1626 
1627 	ASSERT(dmu_tx_is_syncing(tx));
1628 	ASSERT(oldtype < DD_USED_NUM);
1629 	ASSERT(newtype < DD_USED_NUM);
1630 
1631 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1632 
1633 	mutex_enter(&dd->dd_lock);
1634 	dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1635 	accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
1636 	ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
1637 	ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
1638 	ASSERT(uncompressed >= 0 ||
1639 	    ddp->dd_uncompressed_bytes >= -uncompressed);
1640 	ddp->dd_used_bytes += used;
1641 	ddp->dd_uncompressed_bytes += uncompressed;
1642 	ddp->dd_compressed_bytes += compressed;
1643 
1644 	if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1645 		ASSERT(tonew - used <= 0 ||
1646 		    ddp->dd_used_breakdown[oldtype] >= tonew - used);
1647 		ASSERT(tonew >= 0 ||
1648 		    ddp->dd_used_breakdown[newtype] >= -tonew);
1649 		ddp->dd_used_breakdown[oldtype] -= tonew - used;
1650 		ddp->dd_used_breakdown[newtype] += tonew;
1651 #ifdef ZFS_DEBUG
1652 		{
1653 			dd_used_t t;
1654 			uint64_t u = 0;
1655 			for (t = 0; t < DD_USED_NUM; t++)
1656 				u += ddp->dd_used_breakdown[t];
1657 			ASSERT3U(u, ==, ddp->dd_used_bytes);
1658 		}
1659 #endif
1660 	}
1661 	mutex_exit(&dd->dd_lock);
1662 
1663 	if (dd->dd_parent != NULL) {
1664 		dsl_dir_diduse_transfer_space(dd->dd_parent,
1665 		    accounted_delta, compressed, uncompressed,
1666 		    used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1667 	}
1668 }
1669 
1670 typedef struct dsl_dir_set_qr_arg {
1671 	const char *ddsqra_name;
1672 	zprop_source_t ddsqra_source;
1673 	uint64_t ddsqra_value;
1674 } dsl_dir_set_qr_arg_t;
1675 
1676 static int
dsl_dir_set_quota_check(void * arg,dmu_tx_t * tx)1677 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
1678 {
1679 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1680 	dsl_pool_t *dp = dmu_tx_pool(tx);
1681 	dsl_dataset_t *ds;
1682 	int error;
1683 	uint64_t towrite, newval;
1684 
1685 	error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1686 	if (error != 0)
1687 		return (error);
1688 
1689 	error = dsl_prop_predict(ds->ds_dir, "quota",
1690 	    ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1691 	if (error != 0) {
1692 		dsl_dataset_rele(ds, FTAG);
1693 		return (error);
1694 	}
1695 
1696 	if (newval == 0) {
1697 		dsl_dataset_rele(ds, FTAG);
1698 		return (0);
1699 	}
1700 
1701 	mutex_enter(&ds->ds_dir->dd_lock);
1702 	/*
1703 	 * If we are doing the preliminary check in open context, and
1704 	 * there are pending changes, then don't fail it, since the
1705 	 * pending changes could under-estimate the amount of space to be
1706 	 * freed up.
1707 	 */
1708 	towrite = dsl_dir_space_towrite(ds->ds_dir);
1709 	if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1710 	    (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
1711 	    newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
1712 		error = SET_ERROR(ENOSPC);
1713 	}
1714 	mutex_exit(&ds->ds_dir->dd_lock);
1715 	dsl_dataset_rele(ds, FTAG);
1716 	return (error);
1717 }
1718 
1719 static void
dsl_dir_set_quota_sync(void * arg,dmu_tx_t * tx)1720 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
1721 {
1722 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1723 	dsl_pool_t *dp = dmu_tx_pool(tx);
1724 	dsl_dataset_t *ds;
1725 	uint64_t newval;
1726 
1727 	VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1728 
1729 	if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1730 		dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
1731 		    ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1732 		    &ddsqra->ddsqra_value, tx);
1733 
1734 		VERIFY0(dsl_prop_get_int_ds(ds,
1735 		    zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
1736 	} else {
1737 		newval = ddsqra->ddsqra_value;
1738 		spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1739 		    zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
1740 	}
1741 
1742 	dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1743 	mutex_enter(&ds->ds_dir->dd_lock);
1744 	dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
1745 	mutex_exit(&ds->ds_dir->dd_lock);
1746 	dsl_dataset_rele(ds, FTAG);
1747 }
1748 
1749 int
dsl_dir_set_quota(const char * ddname,zprop_source_t source,uint64_t quota)1750 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1751 {
1752 	dsl_dir_set_qr_arg_t ddsqra;
1753 
1754 	ddsqra.ddsqra_name = ddname;
1755 	ddsqra.ddsqra_source = source;
1756 	ddsqra.ddsqra_value = quota;
1757 
1758 	return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
1759 	    dsl_dir_set_quota_sync, &ddsqra, 0,
1760 	    ZFS_SPACE_CHECK_EXTRA_RESERVED));
1761 }
1762 
1763 static int
dsl_dir_set_reservation_check(void * arg,dmu_tx_t * tx)1764 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
1765 {
1766 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1767 	dsl_pool_t *dp = dmu_tx_pool(tx);
1768 	dsl_dataset_t *ds;
1769 	dsl_dir_t *dd;
1770 	uint64_t newval, used, avail;
1771 	int error;
1772 
1773 	error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1774 	if (error != 0)
1775 		return (error);
1776 	dd = ds->ds_dir;
1777 
1778 	/*
1779 	 * If we are doing the preliminary check in open context, the
1780 	 * space estimates may be inaccurate.
1781 	 */
1782 	if (!dmu_tx_is_syncing(tx)) {
1783 		dsl_dataset_rele(ds, FTAG);
1784 		return (0);
1785 	}
1786 
1787 	error = dsl_prop_predict(ds->ds_dir,
1788 	    zfs_prop_to_name(ZFS_PROP_RESERVATION),
1789 	    ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1790 	if (error != 0) {
1791 		dsl_dataset_rele(ds, FTAG);
1792 		return (error);
1793 	}
1794 
1795 	mutex_enter(&dd->dd_lock);
1796 	used = dsl_dir_phys(dd)->dd_used_bytes;
1797 	mutex_exit(&dd->dd_lock);
1798 
1799 	if (dd->dd_parent) {
1800 		avail = dsl_dir_space_available(dd->dd_parent,
1801 		    NULL, 0, FALSE);
1802 	} else {
1803 		avail = dsl_pool_adjustedsize(dd->dd_pool,
1804 		    ZFS_SPACE_CHECK_NORMAL) - used;
1805 	}
1806 
1807 	if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
1808 		uint64_t delta = MAX(used, newval) -
1809 		    MAX(used, dsl_dir_phys(dd)->dd_reserved);
1810 
1811 		if (delta > avail ||
1812 		    (dsl_dir_phys(dd)->dd_quota > 0 &&
1813 		    newval > dsl_dir_phys(dd)->dd_quota))
1814 			error = SET_ERROR(ENOSPC);
1815 	}
1816 
1817 	dsl_dataset_rele(ds, FTAG);
1818 	return (error);
1819 }
1820 
1821 void
dsl_dir_set_reservation_sync_impl(dsl_dir_t * dd,uint64_t value,dmu_tx_t * tx)1822 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1823 {
1824 	uint64_t used;
1825 	int64_t delta;
1826 
1827 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1828 
1829 	mutex_enter(&dd->dd_lock);
1830 	used = dsl_dir_phys(dd)->dd_used_bytes;
1831 	delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
1832 	dsl_dir_phys(dd)->dd_reserved = value;
1833 
1834 	if (dd->dd_parent != NULL) {
1835 		/* Roll up this additional usage into our ancestors */
1836 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1837 		    delta, 0, 0, tx);
1838 	}
1839 	mutex_exit(&dd->dd_lock);
1840 }
1841 
1842 static void
dsl_dir_set_reservation_sync(void * arg,dmu_tx_t * tx)1843 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
1844 {
1845 	dsl_dir_set_qr_arg_t *ddsqra = arg;
1846 	dsl_pool_t *dp = dmu_tx_pool(tx);
1847 	dsl_dataset_t *ds;
1848 	uint64_t newval;
1849 
1850 	VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1851 
1852 	if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1853 		dsl_prop_set_sync_impl(ds,
1854 		    zfs_prop_to_name(ZFS_PROP_RESERVATION),
1855 		    ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1856 		    &ddsqra->ddsqra_value, tx);
1857 
1858 		VERIFY0(dsl_prop_get_int_ds(ds,
1859 		    zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
1860 	} else {
1861 		newval = ddsqra->ddsqra_value;
1862 		spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1863 		    zfs_prop_to_name(ZFS_PROP_RESERVATION),
1864 		    (longlong_t)newval);
1865 	}
1866 
1867 	dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
1868 	dsl_dataset_rele(ds, FTAG);
1869 }
1870 
1871 int
dsl_dir_set_reservation(const char * ddname,zprop_source_t source,uint64_t reservation)1872 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1873     uint64_t reservation)
1874 {
1875 	dsl_dir_set_qr_arg_t ddsqra;
1876 
1877 	ddsqra.ddsqra_name = ddname;
1878 	ddsqra.ddsqra_source = source;
1879 	ddsqra.ddsqra_value = reservation;
1880 
1881 	return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
1882 	    dsl_dir_set_reservation_sync, &ddsqra, 0,
1883 	    ZFS_SPACE_CHECK_EXTRA_RESERVED));
1884 }
1885 
1886 static dsl_dir_t *
closest_common_ancestor(dsl_dir_t * ds1,dsl_dir_t * ds2)1887 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1888 {
1889 	for (; ds1; ds1 = ds1->dd_parent) {
1890 		dsl_dir_t *dd;
1891 		for (dd = ds2; dd; dd = dd->dd_parent) {
1892 			if (ds1 == dd)
1893 				return (dd);
1894 		}
1895 	}
1896 	return (NULL);
1897 }
1898 
1899 /*
1900  * If delta is applied to dd, how much of that delta would be applied to
1901  * ancestor?  Syncing context only.
1902  */
1903 static int64_t
would_change(dsl_dir_t * dd,int64_t delta,dsl_dir_t * ancestor)1904 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1905 {
1906 	if (dd == ancestor)
1907 		return (delta);
1908 
1909 	mutex_enter(&dd->dd_lock);
1910 	delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
1911 	mutex_exit(&dd->dd_lock);
1912 	return (would_change(dd->dd_parent, delta, ancestor));
1913 }
1914 
1915 typedef struct dsl_dir_rename_arg {
1916 	const char *ddra_oldname;
1917 	const char *ddra_newname;
1918 	cred_t *ddra_cred;
1919 	proc_t *ddra_proc;
1920 } dsl_dir_rename_arg_t;
1921 
1922 typedef struct dsl_valid_rename_arg {
1923 	int char_delta;
1924 	int nest_delta;
1925 } dsl_valid_rename_arg_t;
1926 
1927 static int
dsl_valid_rename(dsl_pool_t * dp,dsl_dataset_t * ds,void * arg)1928 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1929 {
1930 	(void) dp;
1931 	dsl_valid_rename_arg_t *dvra = arg;
1932 	char namebuf[ZFS_MAX_DATASET_NAME_LEN];
1933 
1934 	dsl_dataset_name(ds, namebuf);
1935 
1936 	ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN),
1937 	    <, ZFS_MAX_DATASET_NAME_LEN);
1938 	int namelen = strlen(namebuf) + dvra->char_delta;
1939 	int depth = get_dataset_depth(namebuf) + dvra->nest_delta;
1940 
1941 	if (namelen >= ZFS_MAX_DATASET_NAME_LEN)
1942 		return (SET_ERROR(ENAMETOOLONG));
1943 	if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting)
1944 		return (SET_ERROR(ENAMETOOLONG));
1945 	return (0);
1946 }
1947 
1948 static int
dsl_dir_rename_check(void * arg,dmu_tx_t * tx)1949 dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
1950 {
1951 	dsl_dir_rename_arg_t *ddra = arg;
1952 	dsl_pool_t *dp = dmu_tx_pool(tx);
1953 	dsl_dir_t *dd, *newparent;
1954 	dsl_valid_rename_arg_t dvra;
1955 	dsl_dataset_t *parentds;
1956 	objset_t *parentos;
1957 	const char *mynewname;
1958 	int error;
1959 
1960 	/* target dir should exist */
1961 	error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
1962 	if (error != 0)
1963 		return (error);
1964 
1965 	/* new parent should exist */
1966 	error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
1967 	    &newparent, &mynewname);
1968 	if (error != 0) {
1969 		dsl_dir_rele(dd, FTAG);
1970 		return (error);
1971 	}
1972 
1973 	/* can't rename to different pool */
1974 	if (dd->dd_pool != newparent->dd_pool) {
1975 		dsl_dir_rele(newparent, FTAG);
1976 		dsl_dir_rele(dd, FTAG);
1977 		return (SET_ERROR(EXDEV));
1978 	}
1979 
1980 	/* new name should not already exist */
1981 	if (mynewname == NULL) {
1982 		dsl_dir_rele(newparent, FTAG);
1983 		dsl_dir_rele(dd, FTAG);
1984 		return (SET_ERROR(EEXIST));
1985 	}
1986 
1987 	/* can't rename below anything but filesystems (eg. no ZVOLs) */
1988 	error = dsl_dataset_hold_obj(newparent->dd_pool,
1989 	    dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds);
1990 	if (error != 0) {
1991 		dsl_dir_rele(newparent, FTAG);
1992 		dsl_dir_rele(dd, FTAG);
1993 		return (error);
1994 	}
1995 	error = dmu_objset_from_ds(parentds, &parentos);
1996 	if (error != 0) {
1997 		dsl_dataset_rele(parentds, FTAG);
1998 		dsl_dir_rele(newparent, FTAG);
1999 		dsl_dir_rele(dd, FTAG);
2000 		return (error);
2001 	}
2002 	if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
2003 		dsl_dataset_rele(parentds, FTAG);
2004 		dsl_dir_rele(newparent, FTAG);
2005 		dsl_dir_rele(dd, FTAG);
2006 		return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
2007 	}
2008 	dsl_dataset_rele(parentds, FTAG);
2009 
2010 	ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN),
2011 	    <, ZFS_MAX_DATASET_NAME_LEN);
2012 	ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN),
2013 	    <, ZFS_MAX_DATASET_NAME_LEN);
2014 	dvra.char_delta = strlen(ddra->ddra_newname)
2015 	    - strlen(ddra->ddra_oldname);
2016 	dvra.nest_delta = get_dataset_depth(ddra->ddra_newname)
2017 	    - get_dataset_depth(ddra->ddra_oldname);
2018 
2019 	/* if the name length is growing, validate child name lengths */
2020 	if (dvra.char_delta > 0 || dvra.nest_delta > 0) {
2021 		error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
2022 		    &dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2023 		if (error != 0) {
2024 			dsl_dir_rele(newparent, FTAG);
2025 			dsl_dir_rele(dd, FTAG);
2026 			return (error);
2027 		}
2028 	}
2029 
2030 	if (dmu_tx_is_syncing(tx)) {
2031 		if (spa_feature_is_active(dp->dp_spa,
2032 		    SPA_FEATURE_FS_SS_LIMIT)) {
2033 			/*
2034 			 * Although this is the check function and we don't
2035 			 * normally make on-disk changes in check functions,
2036 			 * we need to do that here.
2037 			 *
2038 			 * Ensure this portion of the tree's counts have been
2039 			 * initialized in case the new parent has limits set.
2040 			 */
2041 			dsl_dir_init_fs_ss_count(dd, tx);
2042 		}
2043 	}
2044 
2045 	if (newparent != dd->dd_parent) {
2046 		/* is there enough space? */
2047 		uint64_t myspace =
2048 		    MAX(dsl_dir_phys(dd)->dd_used_bytes,
2049 		    dsl_dir_phys(dd)->dd_reserved);
2050 		objset_t *os = dd->dd_pool->dp_meta_objset;
2051 		uint64_t fs_cnt = 0;
2052 		uint64_t ss_cnt = 0;
2053 
2054 		if (dsl_dir_is_zapified(dd)) {
2055 			int err;
2056 
2057 			err = zap_lookup(os, dd->dd_object,
2058 			    DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
2059 			    &fs_cnt);
2060 			if (err != ENOENT && err != 0) {
2061 				dsl_dir_rele(newparent, FTAG);
2062 				dsl_dir_rele(dd, FTAG);
2063 				return (err);
2064 			}
2065 
2066 			/*
2067 			 * have to add 1 for the filesystem itself that we're
2068 			 * moving
2069 			 */
2070 			fs_cnt++;
2071 
2072 			err = zap_lookup(os, dd->dd_object,
2073 			    DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
2074 			    &ss_cnt);
2075 			if (err != ENOENT && err != 0) {
2076 				dsl_dir_rele(newparent, FTAG);
2077 				dsl_dir_rele(dd, FTAG);
2078 				return (err);
2079 			}
2080 		}
2081 
2082 		/* check for encryption errors */
2083 		error = dsl_dir_rename_crypt_check(dd, newparent);
2084 		if (error != 0) {
2085 			dsl_dir_rele(newparent, FTAG);
2086 			dsl_dir_rele(dd, FTAG);
2087 			return (SET_ERROR(EACCES));
2088 		}
2089 
2090 		/* no rename into our descendant */
2091 		if (closest_common_ancestor(dd, newparent) == dd) {
2092 			dsl_dir_rele(newparent, FTAG);
2093 			dsl_dir_rele(dd, FTAG);
2094 			return (SET_ERROR(EINVAL));
2095 		}
2096 
2097 		error = dsl_dir_transfer_possible(dd->dd_parent,
2098 		    newparent, fs_cnt, ss_cnt, myspace,
2099 		    ddra->ddra_cred, ddra->ddra_proc);
2100 		if (error != 0) {
2101 			dsl_dir_rele(newparent, FTAG);
2102 			dsl_dir_rele(dd, FTAG);
2103 			return (error);
2104 		}
2105 	}
2106 
2107 	dsl_dir_rele(newparent, FTAG);
2108 	dsl_dir_rele(dd, FTAG);
2109 	return (0);
2110 }
2111 
2112 static void
dsl_dir_rename_sync(void * arg,dmu_tx_t * tx)2113 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
2114 {
2115 	dsl_dir_rename_arg_t *ddra = arg;
2116 	dsl_pool_t *dp = dmu_tx_pool(tx);
2117 	dsl_dir_t *dd, *newparent;
2118 	const char *mynewname;
2119 	objset_t *mos = dp->dp_meta_objset;
2120 
2121 	VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
2122 	VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
2123 	    &mynewname));
2124 
2125 	ASSERT3P(mynewname, !=, NULL);
2126 
2127 	/* Log this before we change the name. */
2128 	spa_history_log_internal_dd(dd, "rename", tx,
2129 	    "-> %s", ddra->ddra_newname);
2130 
2131 	if (newparent != dd->dd_parent) {
2132 		objset_t *os = dd->dd_pool->dp_meta_objset;
2133 		uint64_t fs_cnt = 0;
2134 		uint64_t ss_cnt = 0;
2135 
2136 		/*
2137 		 * We already made sure the dd counts were initialized in the
2138 		 * check function.
2139 		 */
2140 		if (spa_feature_is_active(dp->dp_spa,
2141 		    SPA_FEATURE_FS_SS_LIMIT)) {
2142 			VERIFY0(zap_lookup(os, dd->dd_object,
2143 			    DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
2144 			    &fs_cnt));
2145 			/* add 1 for the filesystem itself that we're moving */
2146 			fs_cnt++;
2147 
2148 			VERIFY0(zap_lookup(os, dd->dd_object,
2149 			    DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
2150 			    &ss_cnt));
2151 		}
2152 
2153 		dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
2154 		    DD_FIELD_FILESYSTEM_COUNT, tx);
2155 		dsl_fs_ss_count_adjust(newparent, fs_cnt,
2156 		    DD_FIELD_FILESYSTEM_COUNT, tx);
2157 
2158 		dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
2159 		    DD_FIELD_SNAPSHOT_COUNT, tx);
2160 		dsl_fs_ss_count_adjust(newparent, ss_cnt,
2161 		    DD_FIELD_SNAPSHOT_COUNT, tx);
2162 
2163 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
2164 		    -dsl_dir_phys(dd)->dd_used_bytes,
2165 		    -dsl_dir_phys(dd)->dd_compressed_bytes,
2166 		    -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
2167 		dsl_dir_diduse_space(newparent, DD_USED_CHILD,
2168 		    dsl_dir_phys(dd)->dd_used_bytes,
2169 		    dsl_dir_phys(dd)->dd_compressed_bytes,
2170 		    dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
2171 
2172 		if (dsl_dir_phys(dd)->dd_reserved >
2173 		    dsl_dir_phys(dd)->dd_used_bytes) {
2174 			uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
2175 			    dsl_dir_phys(dd)->dd_used_bytes;
2176 
2177 			dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
2178 			    -unused_rsrv, 0, 0, tx);
2179 			dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
2180 			    unused_rsrv, 0, 0, tx);
2181 		}
2182 	}
2183 
2184 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
2185 
2186 	/* remove from old parent zapobj */
2187 	VERIFY0(zap_remove(mos,
2188 	    dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
2189 	    dd->dd_myname, tx));
2190 
2191 	(void) strlcpy(dd->dd_myname, mynewname,
2192 	    sizeof (dd->dd_myname));
2193 	dsl_dir_rele(dd->dd_parent, dd);
2194 	dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
2195 	VERIFY0(dsl_dir_hold_obj(dp,
2196 	    newparent->dd_object, NULL, dd, &dd->dd_parent));
2197 
2198 	/* add to new parent zapobj */
2199 	VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
2200 	    dd->dd_myname, 8, 1, &dd->dd_object, tx));
2201 
2202 	/* TODO: A rename callback to avoid these layering violations. */
2203 	zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname);
2204 	zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
2205 	    ddra->ddra_newname, B_TRUE);
2206 
2207 	dsl_prop_notify_all(dd);
2208 
2209 	dsl_dir_rele(newparent, FTAG);
2210 	dsl_dir_rele(dd, FTAG);
2211 }
2212 
2213 int
dsl_dir_rename(const char * oldname,const char * newname)2214 dsl_dir_rename(const char *oldname, const char *newname)
2215 {
2216 	dsl_dir_rename_arg_t ddra;
2217 
2218 	ddra.ddra_oldname = oldname;
2219 	ddra.ddra_newname = newname;
2220 	ddra.ddra_cred = CRED();
2221 	ddra.ddra_proc = curproc;
2222 
2223 	return (dsl_sync_task(oldname,
2224 	    dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
2225 	    3, ZFS_SPACE_CHECK_RESERVED));
2226 }
2227 
2228 int
dsl_dir_transfer_possible(dsl_dir_t * sdd,dsl_dir_t * tdd,uint64_t fs_cnt,uint64_t ss_cnt,uint64_t space,cred_t * cr,proc_t * proc)2229 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
2230     uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space,
2231     cred_t *cr, proc_t *proc)
2232 {
2233 	dsl_dir_t *ancestor;
2234 	int64_t adelta;
2235 	uint64_t avail;
2236 	int err;
2237 
2238 	ancestor = closest_common_ancestor(sdd, tdd);
2239 	adelta = would_change(sdd, -space, ancestor);
2240 	avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
2241 	if (avail < space)
2242 		return (SET_ERROR(ENOSPC));
2243 
2244 	err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
2245 	    ancestor, cr, proc);
2246 	if (err != 0)
2247 		return (err);
2248 	err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
2249 	    ancestor, cr, proc);
2250 	if (err != 0)
2251 		return (err);
2252 
2253 	return (0);
2254 }
2255 
2256 inode_timespec_t
dsl_dir_snap_cmtime(dsl_dir_t * dd)2257 dsl_dir_snap_cmtime(dsl_dir_t *dd)
2258 {
2259 	inode_timespec_t t;
2260 
2261 	mutex_enter(&dd->dd_lock);
2262 	t = dd->dd_snap_cmtime;
2263 	mutex_exit(&dd->dd_lock);
2264 
2265 	return (t);
2266 }
2267 
2268 void
dsl_dir_snap_cmtime_update(dsl_dir_t * dd,dmu_tx_t * tx)2269 dsl_dir_snap_cmtime_update(dsl_dir_t *dd, dmu_tx_t *tx)
2270 {
2271 	dsl_pool_t *dp = dmu_tx_pool(tx);
2272 	inode_timespec_t t;
2273 	gethrestime(&t);
2274 
2275 	mutex_enter(&dd->dd_lock);
2276 	dd->dd_snap_cmtime = t;
2277 	if (spa_feature_is_enabled(dp->dp_spa,
2278 	    SPA_FEATURE_EXTENSIBLE_DATASET)) {
2279 		objset_t *mos = dd->dd_pool->dp_meta_objset;
2280 		uint64_t ddobj = dd->dd_object;
2281 		dsl_dir_zapify(dd, tx);
2282 		VERIFY0(zap_update(mos, ddobj,
2283 		    DD_FIELD_SNAPSHOTS_CHANGED,
2284 		    sizeof (uint64_t),
2285 		    sizeof (inode_timespec_t) / sizeof (uint64_t),
2286 		    &t, tx));
2287 	}
2288 	mutex_exit(&dd->dd_lock);
2289 }
2290 
2291 void
dsl_dir_zapify(dsl_dir_t * dd,dmu_tx_t * tx)2292 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
2293 {
2294 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2295 	dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
2296 }
2297 
2298 boolean_t
dsl_dir_is_zapified(dsl_dir_t * dd)2299 dsl_dir_is_zapified(dsl_dir_t *dd)
2300 {
2301 	dmu_object_info_t doi;
2302 
2303 	dmu_object_info_from_db(dd->dd_dbuf, &doi);
2304 	return (doi.doi_type == DMU_OTN_ZAP_METADATA);
2305 }
2306 
2307 int
dsl_dir_livelist_open(dsl_dir_t * dd,uint64_t obj)2308 dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj)
2309 {
2310 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2311 	ASSERT(spa_feature_is_active(dd->dd_pool->dp_spa,
2312 	    SPA_FEATURE_LIVELIST));
2313 	int err = dsl_deadlist_open(&dd->dd_livelist, mos, obj);
2314 	if (err != 0)
2315 		return (err);
2316 	bplist_create(&dd->dd_pending_allocs);
2317 	bplist_create(&dd->dd_pending_frees);
2318 	return (0);
2319 }
2320 
2321 void
dsl_dir_livelist_close(dsl_dir_t * dd)2322 dsl_dir_livelist_close(dsl_dir_t *dd)
2323 {
2324 	dsl_deadlist_close(&dd->dd_livelist);
2325 	bplist_destroy(&dd->dd_pending_allocs);
2326 	bplist_destroy(&dd->dd_pending_frees);
2327 }
2328 
2329 void
dsl_dir_remove_livelist(dsl_dir_t * dd,dmu_tx_t * tx,boolean_t total)2330 dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total)
2331 {
2332 	uint64_t obj;
2333 	dsl_pool_t *dp = dmu_tx_pool(tx);
2334 	spa_t *spa = dp->dp_spa;
2335 	livelist_condense_entry_t to_condense = spa->spa_to_condense;
2336 
2337 	if (!dsl_deadlist_is_open(&dd->dd_livelist))
2338 		return;
2339 
2340 	/*
2341 	 * If the livelist being removed is set to be condensed, stop the
2342 	 * condense zthr and indicate the cancellation in the spa_to_condense
2343 	 * struct in case the condense no-wait synctask has already started
2344 	 */
2345 	zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
2346 	if (ll_condense_thread != NULL &&
2347 	    (to_condense.ds != NULL) && (to_condense.ds->ds_dir == dd)) {
2348 		/*
2349 		 * We use zthr_wait_cycle_done instead of zthr_cancel
2350 		 * because we don't want to destroy the zthr, just have
2351 		 * it skip its current task.
2352 		 */
2353 		spa->spa_to_condense.cancelled = B_TRUE;
2354 		zthr_wait_cycle_done(ll_condense_thread);
2355 		/*
2356 		 * If we've returned from zthr_wait_cycle_done without
2357 		 * clearing the to_condense data structure it's either
2358 		 * because the no-wait synctask has started (which is
2359 		 * indicated by 'syncing' field of to_condense) and we
2360 		 * can expect it to clear to_condense on its own.
2361 		 * Otherwise, we returned before the zthr ran. The
2362 		 * checkfunc will now fail as cancelled == B_TRUE so we
2363 		 * can safely NULL out ds, allowing a different dir's
2364 		 * livelist to be condensed.
2365 		 *
2366 		 * We can be sure that the to_condense struct will not
2367 		 * be repopulated at this stage because both this
2368 		 * function and dsl_livelist_try_condense execute in
2369 		 * syncing context.
2370 		 */
2371 		if ((spa->spa_to_condense.ds != NULL) &&
2372 		    !spa->spa_to_condense.syncing) {
2373 			dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf,
2374 			    spa);
2375 			spa->spa_to_condense.ds = NULL;
2376 		}
2377 	}
2378 
2379 	dsl_dir_livelist_close(dd);
2380 	VERIFY0(zap_lookup(dp->dp_meta_objset, dd->dd_object,
2381 	    DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &obj));
2382 	VERIFY0(zap_remove(dp->dp_meta_objset, dd->dd_object,
2383 	    DD_FIELD_LIVELIST, tx));
2384 	if (total) {
2385 		dsl_deadlist_free(dp->dp_meta_objset, obj, tx);
2386 		spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
2387 	}
2388 }
2389 
2390 static int
dsl_dir_activity_in_progress(dsl_dir_t * dd,dsl_dataset_t * ds,zfs_wait_activity_t activity,boolean_t * in_progress)2391 dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds,
2392     zfs_wait_activity_t activity, boolean_t *in_progress)
2393 {
2394 	int error = 0;
2395 
2396 	ASSERT(MUTEX_HELD(&dd->dd_activity_lock));
2397 
2398 	switch (activity) {
2399 	case ZFS_WAIT_DELETEQ: {
2400 #ifdef _KERNEL
2401 		objset_t *os;
2402 		error = dmu_objset_from_ds(ds, &os);
2403 		if (error != 0)
2404 			break;
2405 
2406 		mutex_enter(&os->os_user_ptr_lock);
2407 		void *user = dmu_objset_get_user(os);
2408 		mutex_exit(&os->os_user_ptr_lock);
2409 		if (dmu_objset_type(os) != DMU_OST_ZFS ||
2410 		    user == NULL || zfs_get_vfs_flag_unmounted(os)) {
2411 			*in_progress = B_FALSE;
2412 			return (0);
2413 		}
2414 
2415 		uint64_t readonly = B_FALSE;
2416 		error = zfs_get_temporary_prop(ds, ZFS_PROP_READONLY, &readonly,
2417 		    NULL);
2418 
2419 		if (error != 0)
2420 			break;
2421 
2422 		if (readonly || !spa_writeable(dd->dd_pool->dp_spa)) {
2423 			*in_progress = B_FALSE;
2424 			return (0);
2425 		}
2426 
2427 		uint64_t count, unlinked_obj;
2428 		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
2429 		    &unlinked_obj);
2430 		if (error != 0) {
2431 			dsl_dataset_rele(ds, FTAG);
2432 			break;
2433 		}
2434 		error = zap_count(os, unlinked_obj, &count);
2435 
2436 		if (error == 0)
2437 			*in_progress = (count != 0);
2438 		break;
2439 #else
2440 		/*
2441 		 * The delete queue is ZPL specific, and libzpool doesn't have
2442 		 * it. It doesn't make sense to wait for it.
2443 		 */
2444 		(void) ds;
2445 		*in_progress = B_FALSE;
2446 		break;
2447 #endif
2448 	}
2449 	default:
2450 		panic("unrecognized value for activity %d", activity);
2451 	}
2452 
2453 	return (error);
2454 }
2455 
2456 int
dsl_dir_wait(dsl_dir_t * dd,dsl_dataset_t * ds,zfs_wait_activity_t activity,boolean_t * waited)2457 dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
2458     boolean_t *waited)
2459 {
2460 	int error = 0;
2461 	boolean_t in_progress;
2462 	dsl_pool_t *dp = dd->dd_pool;
2463 	for (;;) {
2464 		dsl_pool_config_enter(dp, FTAG);
2465 		error = dsl_dir_activity_in_progress(dd, ds, activity,
2466 		    &in_progress);
2467 		dsl_pool_config_exit(dp, FTAG);
2468 		if (error != 0 || !in_progress)
2469 			break;
2470 
2471 		*waited = B_TRUE;
2472 
2473 		if (cv_wait_sig(&dd->dd_activity_cv, &dd->dd_activity_lock) ==
2474 		    0 || dd->dd_activity_cancelled) {
2475 			error = SET_ERROR(EINTR);
2476 			break;
2477 		}
2478 	}
2479 	return (error);
2480 }
2481 
2482 void
dsl_dir_cancel_waiters(dsl_dir_t * dd)2483 dsl_dir_cancel_waiters(dsl_dir_t *dd)
2484 {
2485 	mutex_enter(&dd->dd_activity_lock);
2486 	dd->dd_activity_cancelled = B_TRUE;
2487 	cv_broadcast(&dd->dd_activity_cv);
2488 	while (dd->dd_activity_waiters > 0)
2489 		cv_wait(&dd->dd_activity_cv, &dd->dd_activity_lock);
2490 	mutex_exit(&dd->dd_activity_lock);
2491 }
2492 
2493 #if defined(_KERNEL)
2494 EXPORT_SYMBOL(dsl_dir_set_quota);
2495 EXPORT_SYMBOL(dsl_dir_set_reservation);
2496 #endif
2497 
2498 ZFS_MODULE_PARAM(zfs, , zvol_enforce_quotas, INT, ZMOD_RW,
2499 	"Enable strict ZVOL quota enforcment");
2500