xref: /titanic_52/usr/src/uts/common/fs/zfs/dsl_dir.c (revision 9d2159663a6316391e58ae8fc8a1e1a63dc9789c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/dmu.h>
26 #include <sys/dmu_objset.h>
27 #include <sys/dmu_tx.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dsl_deleg.h>
33 #include <sys/spa.h>
34 #include <sys/metaslab.h>
35 #include <sys/zap.h>
36 #include <sys/zio.h>
37 #include <sys/arc.h>
38 #include <sys/sunddi.h>
39 #include "zfs_namecheck.h"
40 
41 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
42 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx);
43 
44 
45 /* ARGSUSED */
46 static void
47 dsl_dir_evict(dmu_buf_t *db, void *arg)
48 {
49 	dsl_dir_t *dd = arg;
50 	dsl_pool_t *dp = dd->dd_pool;
51 	int t;
52 
53 	for (t = 0; t < TXG_SIZE; t++) {
54 		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
55 		ASSERT(dd->dd_tempreserved[t] == 0);
56 		ASSERT(dd->dd_space_towrite[t] == 0);
57 	}
58 
59 	if (dd->dd_parent)
60 		dsl_dir_close(dd->dd_parent, dd);
61 
62 	spa_close(dd->dd_pool->dp_spa, dd);
63 
64 	/*
65 	 * The props callback list should have been cleaned up by
66 	 * objset_evict().
67 	 */
68 	list_destroy(&dd->dd_prop_cbs);
69 	mutex_destroy(&dd->dd_lock);
70 	kmem_free(dd, sizeof (dsl_dir_t));
71 }
72 
73 int
74 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
75     const char *tail, void *tag, dsl_dir_t **ddp)
76 {
77 	dmu_buf_t *dbuf;
78 	dsl_dir_t *dd;
79 	int err;
80 
81 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
82 	    dsl_pool_sync_context(dp));
83 
84 	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
85 	if (err)
86 		return (err);
87 	dd = dmu_buf_get_user(dbuf);
88 #ifdef ZFS_DEBUG
89 	{
90 		dmu_object_info_t doi;
91 		dmu_object_info_from_db(dbuf, &doi);
92 		ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
93 		ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
94 	}
95 #endif
96 	if (dd == NULL) {
97 		dsl_dir_t *winner;
98 
99 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
100 		dd->dd_object = ddobj;
101 		dd->dd_dbuf = dbuf;
102 		dd->dd_pool = dp;
103 		dd->dd_phys = dbuf->db_data;
104 		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
105 
106 		list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
107 		    offsetof(dsl_prop_cb_record_t, cbr_node));
108 
109 		dsl_dir_snap_cmtime_update(dd);
110 
111 		if (dd->dd_phys->dd_parent_obj) {
112 			err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
113 			    NULL, dd, &dd->dd_parent);
114 			if (err)
115 				goto errout;
116 			if (tail) {
117 #ifdef ZFS_DEBUG
118 				uint64_t foundobj;
119 
120 				err = zap_lookup(dp->dp_meta_objset,
121 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
122 				    tail, sizeof (foundobj), 1, &foundobj);
123 				ASSERT(err || foundobj == ddobj);
124 #endif
125 				(void) strcpy(dd->dd_myname, tail);
126 			} else {
127 				err = zap_value_search(dp->dp_meta_objset,
128 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
129 				    ddobj, 0, dd->dd_myname);
130 			}
131 			if (err)
132 				goto errout;
133 		} else {
134 			(void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
135 		}
136 
137 		if (dsl_dir_is_clone(dd)) {
138 			dmu_buf_t *origin_bonus;
139 			dsl_dataset_phys_t *origin_phys;
140 
141 			/*
142 			 * We can't open the origin dataset, because
143 			 * that would require opening this dsl_dir.
144 			 * Just look at its phys directly instead.
145 			 */
146 			err = dmu_bonus_hold(dp->dp_meta_objset,
147 			    dd->dd_phys->dd_origin_obj, FTAG, &origin_bonus);
148 			if (err)
149 				goto errout;
150 			origin_phys = origin_bonus->db_data;
151 			dd->dd_origin_txg =
152 			    origin_phys->ds_creation_txg;
153 			dmu_buf_rele(origin_bonus, FTAG);
154 		}
155 
156 		winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
157 		    dsl_dir_evict);
158 		if (winner) {
159 			if (dd->dd_parent)
160 				dsl_dir_close(dd->dd_parent, dd);
161 			mutex_destroy(&dd->dd_lock);
162 			kmem_free(dd, sizeof (dsl_dir_t));
163 			dd = winner;
164 		} else {
165 			spa_open_ref(dp->dp_spa, dd);
166 		}
167 	}
168 
169 	/*
170 	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
171 	 * holds on the spa.  We need the open-to-close holds because
172 	 * otherwise the spa_refcnt wouldn't change when we open a
173 	 * dir which the spa also has open, so we could incorrectly
174 	 * think it was OK to unload/export/destroy the pool.  We need
175 	 * the instantiate-to-evict hold because the dsl_dir_t has a
176 	 * pointer to the dd_pool, which has a pointer to the spa_t.
177 	 */
178 	spa_open_ref(dp->dp_spa, tag);
179 	ASSERT3P(dd->dd_pool, ==, dp);
180 	ASSERT3U(dd->dd_object, ==, ddobj);
181 	ASSERT3P(dd->dd_dbuf, ==, dbuf);
182 	*ddp = dd;
183 	return (0);
184 
185 errout:
186 	if (dd->dd_parent)
187 		dsl_dir_close(dd->dd_parent, dd);
188 	mutex_destroy(&dd->dd_lock);
189 	kmem_free(dd, sizeof (dsl_dir_t));
190 	dmu_buf_rele(dbuf, tag);
191 	return (err);
192 
193 }
194 
195 void
196 dsl_dir_close(dsl_dir_t *dd, void *tag)
197 {
198 	dprintf_dd(dd, "%s\n", "");
199 	spa_close(dd->dd_pool->dp_spa, tag);
200 	dmu_buf_rele(dd->dd_dbuf, tag);
201 }
202 
203 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
204 void
205 dsl_dir_name(dsl_dir_t *dd, char *buf)
206 {
207 	if (dd->dd_parent) {
208 		dsl_dir_name(dd->dd_parent, buf);
209 		(void) strcat(buf, "/");
210 	} else {
211 		buf[0] = '\0';
212 	}
213 	if (!MUTEX_HELD(&dd->dd_lock)) {
214 		/*
215 		 * recursive mutex so that we can use
216 		 * dprintf_dd() with dd_lock held
217 		 */
218 		mutex_enter(&dd->dd_lock);
219 		(void) strcat(buf, dd->dd_myname);
220 		mutex_exit(&dd->dd_lock);
221 	} else {
222 		(void) strcat(buf, dd->dd_myname);
223 	}
224 }
225 
226 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
227 int
228 dsl_dir_namelen(dsl_dir_t *dd)
229 {
230 	int result = 0;
231 
232 	if (dd->dd_parent) {
233 		/* parent's name + 1 for the "/" */
234 		result = dsl_dir_namelen(dd->dd_parent) + 1;
235 	}
236 
237 	if (!MUTEX_HELD(&dd->dd_lock)) {
238 		/* see dsl_dir_name */
239 		mutex_enter(&dd->dd_lock);
240 		result += strlen(dd->dd_myname);
241 		mutex_exit(&dd->dd_lock);
242 	} else {
243 		result += strlen(dd->dd_myname);
244 	}
245 
246 	return (result);
247 }
248 
249 static int
250 getcomponent(const char *path, char *component, const char **nextp)
251 {
252 	char *p;
253 	if ((path == NULL) || (path[0] == '\0'))
254 		return (ENOENT);
255 	/* This would be a good place to reserve some namespace... */
256 	p = strpbrk(path, "/@");
257 	if (p && (p[1] == '/' || p[1] == '@')) {
258 		/* two separators in a row */
259 		return (EINVAL);
260 	}
261 	if (p == NULL || p == path) {
262 		/*
263 		 * if the first thing is an @ or /, it had better be an
264 		 * @ and it had better not have any more ats or slashes,
265 		 * and it had better have something after the @.
266 		 */
267 		if (p != NULL &&
268 		    (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
269 			return (EINVAL);
270 		if (strlen(path) >= MAXNAMELEN)
271 			return (ENAMETOOLONG);
272 		(void) strcpy(component, path);
273 		p = NULL;
274 	} else if (p[0] == '/') {
275 		if (p-path >= MAXNAMELEN)
276 			return (ENAMETOOLONG);
277 		(void) strncpy(component, path, p - path);
278 		component[p-path] = '\0';
279 		p++;
280 	} else if (p[0] == '@') {
281 		/*
282 		 * if the next separator is an @, there better not be
283 		 * any more slashes.
284 		 */
285 		if (strchr(path, '/'))
286 			return (EINVAL);
287 		if (p-path >= MAXNAMELEN)
288 			return (ENAMETOOLONG);
289 		(void) strncpy(component, path, p - path);
290 		component[p-path] = '\0';
291 	} else {
292 		ASSERT(!"invalid p");
293 	}
294 	*nextp = p;
295 	return (0);
296 }
297 
298 /*
299  * same as dsl_open_dir, ignore the first component of name and use the
300  * spa instead
301  */
302 int
303 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
304     dsl_dir_t **ddp, const char **tailp)
305 {
306 	char buf[MAXNAMELEN];
307 	const char *next, *nextnext = NULL;
308 	int err;
309 	dsl_dir_t *dd;
310 	dsl_pool_t *dp;
311 	uint64_t ddobj;
312 	int openedspa = FALSE;
313 
314 	dprintf("%s\n", name);
315 
316 	err = getcomponent(name, buf, &next);
317 	if (err)
318 		return (err);
319 	if (spa == NULL) {
320 		err = spa_open(buf, &spa, FTAG);
321 		if (err) {
322 			dprintf("spa_open(%s) failed\n", buf);
323 			return (err);
324 		}
325 		openedspa = TRUE;
326 
327 		/* XXX this assertion belongs in spa_open */
328 		ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
329 	}
330 
331 	dp = spa_get_dsl(spa);
332 
333 	rw_enter(&dp->dp_config_rwlock, RW_READER);
334 	err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
335 	if (err) {
336 		rw_exit(&dp->dp_config_rwlock);
337 		if (openedspa)
338 			spa_close(spa, FTAG);
339 		return (err);
340 	}
341 
342 	while (next != NULL) {
343 		dsl_dir_t *child_ds;
344 		err = getcomponent(next, buf, &nextnext);
345 		if (err)
346 			break;
347 		ASSERT(next[0] != '\0');
348 		if (next[0] == '@')
349 			break;
350 		dprintf("looking up %s in obj%lld\n",
351 		    buf, dd->dd_phys->dd_child_dir_zapobj);
352 
353 		err = zap_lookup(dp->dp_meta_objset,
354 		    dd->dd_phys->dd_child_dir_zapobj,
355 		    buf, sizeof (ddobj), 1, &ddobj);
356 		if (err) {
357 			if (err == ENOENT)
358 				err = 0;
359 			break;
360 		}
361 
362 		err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
363 		if (err)
364 			break;
365 		dsl_dir_close(dd, tag);
366 		dd = child_ds;
367 		next = nextnext;
368 	}
369 	rw_exit(&dp->dp_config_rwlock);
370 
371 	if (err) {
372 		dsl_dir_close(dd, tag);
373 		if (openedspa)
374 			spa_close(spa, FTAG);
375 		return (err);
376 	}
377 
378 	/*
379 	 * It's an error if there's more than one component left, or
380 	 * tailp==NULL and there's any component left.
381 	 */
382 	if (next != NULL &&
383 	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
384 		/* bad path name */
385 		dsl_dir_close(dd, tag);
386 		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
387 		err = ENOENT;
388 	}
389 	if (tailp)
390 		*tailp = next;
391 	if (openedspa)
392 		spa_close(spa, FTAG);
393 	*ddp = dd;
394 	return (err);
395 }
396 
397 /*
398  * Return the dsl_dir_t, and possibly the last component which couldn't
399  * be found in *tail.  Return NULL if the path is bogus, or if
400  * tail==NULL and we couldn't parse the whole name.  (*tail)[0] == '@'
401  * means that the last component is a snapshot.
402  */
403 int
404 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
405 {
406 	return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
407 }
408 
409 uint64_t
410 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
411     dmu_tx_t *tx)
412 {
413 	objset_t *mos = dp->dp_meta_objset;
414 	uint64_t ddobj;
415 	dsl_dir_phys_t *ddphys;
416 	dmu_buf_t *dbuf;
417 
418 	ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
419 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
420 	if (pds) {
421 		VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
422 		    name, sizeof (uint64_t), 1, &ddobj, tx));
423 	} else {
424 		/* it's the root dir */
425 		VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
426 		    DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
427 	}
428 	VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
429 	dmu_buf_will_dirty(dbuf, tx);
430 	ddphys = dbuf->db_data;
431 
432 	ddphys->dd_creation_time = gethrestime_sec();
433 	if (pds)
434 		ddphys->dd_parent_obj = pds->dd_object;
435 	ddphys->dd_props_zapobj = zap_create(mos,
436 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
437 	ddphys->dd_child_dir_zapobj = zap_create(mos,
438 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
439 	if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
440 		ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
441 	dmu_buf_rele(dbuf, FTAG);
442 
443 	return (ddobj);
444 }
445 
446 /* ARGSUSED */
447 int
448 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
449 {
450 	dsl_dataset_t *ds = arg1;
451 	dsl_dir_t *dd = ds->ds_dir;
452 	dsl_pool_t *dp = dd->dd_pool;
453 	objset_t *mos = dp->dp_meta_objset;
454 	int err;
455 	uint64_t count;
456 
457 	/*
458 	 * There should be exactly two holds, both from
459 	 * dsl_dataset_destroy: one on the dd directory, and one on its
460 	 * head ds.  Otherwise, someone is trying to lookup something
461 	 * inside this dir while we want to destroy it.  The
462 	 * config_rwlock ensures that nobody else opens it after we
463 	 * check.
464 	 */
465 	if (dmu_buf_refcount(dd->dd_dbuf) > 2)
466 		return (EBUSY);
467 
468 	err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
469 	if (err)
470 		return (err);
471 	if (count != 0)
472 		return (EEXIST);
473 
474 	return (0);
475 }
476 
477 void
478 dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
479 {
480 	dsl_dataset_t *ds = arg1;
481 	dsl_dir_t *dd = ds->ds_dir;
482 	objset_t *mos = dd->dd_pool->dp_meta_objset;
483 	dsl_prop_setarg_t psa;
484 	uint64_t value = 0;
485 	uint64_t obj;
486 	dd_used_t t;
487 
488 	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
489 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
490 
491 	/* Remove our reservation. */
492 	dsl_prop_setarg_init_uint64(&psa, "reservation",
493 	    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
494 	    &value);
495 	psa.psa_effective_value = 0;	/* predict default value */
496 
497 	dsl_dir_set_reservation_sync(ds, &psa, tx);
498 
499 	ASSERT3U(dd->dd_phys->dd_used_bytes, ==, 0);
500 	ASSERT3U(dd->dd_phys->dd_reserved, ==, 0);
501 	for (t = 0; t < DD_USED_NUM; t++)
502 		ASSERT3U(dd->dd_phys->dd_used_breakdown[t], ==, 0);
503 
504 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
505 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
506 	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
507 	VERIFY(0 == zap_remove(mos,
508 	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
509 
510 	obj = dd->dd_object;
511 	dsl_dir_close(dd, tag);
512 	VERIFY(0 == dmu_object_free(mos, obj, tx));
513 }
514 
515 boolean_t
516 dsl_dir_is_clone(dsl_dir_t *dd)
517 {
518 	return (dd->dd_phys->dd_origin_obj &&
519 	    (dd->dd_pool->dp_origin_snap == NULL ||
520 	    dd->dd_phys->dd_origin_obj !=
521 	    dd->dd_pool->dp_origin_snap->ds_object));
522 }
523 
524 void
525 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
526 {
527 	mutex_enter(&dd->dd_lock);
528 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
529 	    dd->dd_phys->dd_used_bytes);
530 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
531 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
532 	    dd->dd_phys->dd_reserved);
533 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
534 	    dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
535 	    (dd->dd_phys->dd_uncompressed_bytes * 100 /
536 	    dd->dd_phys->dd_compressed_bytes));
537 	if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
538 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
539 		    dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]);
540 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
541 		    dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]);
542 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
543 		    dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]);
544 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
545 		    dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] +
546 		    dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]);
547 	}
548 	mutex_exit(&dd->dd_lock);
549 
550 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
551 	if (dsl_dir_is_clone(dd)) {
552 		dsl_dataset_t *ds;
553 		char buf[MAXNAMELEN];
554 
555 		VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
556 		    dd->dd_phys->dd_origin_obj, FTAG, &ds));
557 		dsl_dataset_name(ds, buf);
558 		dsl_dataset_rele(ds, FTAG);
559 		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
560 	}
561 	rw_exit(&dd->dd_pool->dp_config_rwlock);
562 }
563 
564 void
565 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
566 {
567 	dsl_pool_t *dp = dd->dd_pool;
568 
569 	ASSERT(dd->dd_phys);
570 
571 	if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
572 		/* up the hold count until we can be written out */
573 		dmu_buf_add_ref(dd->dd_dbuf, dd);
574 	}
575 }
576 
577 static int64_t
578 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
579 {
580 	uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
581 	uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
582 	return (new_accounted - old_accounted);
583 }
584 
585 void
586 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
587 {
588 	ASSERT(dmu_tx_is_syncing(tx));
589 
590 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
591 
592 	mutex_enter(&dd->dd_lock);
593 	ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0);
594 	dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
595 	    dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
596 	dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
597 	mutex_exit(&dd->dd_lock);
598 
599 	/* release the hold from dsl_dir_dirty */
600 	dmu_buf_rele(dd->dd_dbuf, dd);
601 }
602 
603 static uint64_t
604 dsl_dir_space_towrite(dsl_dir_t *dd)
605 {
606 	uint64_t space = 0;
607 	int i;
608 
609 	ASSERT(MUTEX_HELD(&dd->dd_lock));
610 
611 	for (i = 0; i < TXG_SIZE; i++) {
612 		space += dd->dd_space_towrite[i&TXG_MASK];
613 		ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
614 	}
615 	return (space);
616 }
617 
618 /*
619  * How much space would dd have available if ancestor had delta applied
620  * to it?  If ondiskonly is set, we're only interested in what's
621  * on-disk, not estimated pending changes.
622  */
623 uint64_t
624 dsl_dir_space_available(dsl_dir_t *dd,
625     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
626 {
627 	uint64_t parentspace, myspace, quota, used;
628 
629 	/*
630 	 * If there are no restrictions otherwise, assume we have
631 	 * unlimited space available.
632 	 */
633 	quota = UINT64_MAX;
634 	parentspace = UINT64_MAX;
635 
636 	if (dd->dd_parent != NULL) {
637 		parentspace = dsl_dir_space_available(dd->dd_parent,
638 		    ancestor, delta, ondiskonly);
639 	}
640 
641 	mutex_enter(&dd->dd_lock);
642 	if (dd->dd_phys->dd_quota != 0)
643 		quota = dd->dd_phys->dd_quota;
644 	used = dd->dd_phys->dd_used_bytes;
645 	if (!ondiskonly)
646 		used += dsl_dir_space_towrite(dd);
647 
648 	if (dd->dd_parent == NULL) {
649 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
650 		quota = MIN(quota, poolsize);
651 	}
652 
653 	if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
654 		/*
655 		 * We have some space reserved, in addition to what our
656 		 * parent gave us.
657 		 */
658 		parentspace += dd->dd_phys->dd_reserved - used;
659 	}
660 
661 	if (dd == ancestor) {
662 		ASSERT(delta <= 0);
663 		ASSERT(used >= -delta);
664 		used += delta;
665 		if (parentspace != UINT64_MAX)
666 			parentspace -= delta;
667 	}
668 
669 	if (used > quota) {
670 		/* over quota */
671 		myspace = 0;
672 	} else {
673 		/*
674 		 * the lesser of the space provided by our parent and
675 		 * the space left in our quota
676 		 */
677 		myspace = MIN(parentspace, quota - used);
678 	}
679 
680 	mutex_exit(&dd->dd_lock);
681 
682 	return (myspace);
683 }
684 
685 struct tempreserve {
686 	list_node_t tr_node;
687 	dsl_pool_t *tr_dp;
688 	dsl_dir_t *tr_ds;
689 	uint64_t tr_size;
690 };
691 
692 static int
693 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
694     boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
695     dmu_tx_t *tx, boolean_t first)
696 {
697 	uint64_t txg = tx->tx_txg;
698 	uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
699 	uint64_t deferred = 0;
700 	struct tempreserve *tr;
701 	int retval = EDQUOT;
702 	int txgidx = txg & TXG_MASK;
703 	int i;
704 	uint64_t ref_rsrv = 0;
705 
706 	ASSERT3U(txg, !=, 0);
707 	ASSERT3S(asize, >, 0);
708 
709 	mutex_enter(&dd->dd_lock);
710 
711 	/*
712 	 * Check against the dsl_dir's quota.  We don't add in the delta
713 	 * when checking for over-quota because they get one free hit.
714 	 */
715 	est_inflight = dsl_dir_space_towrite(dd);
716 	for (i = 0; i < TXG_SIZE; i++)
717 		est_inflight += dd->dd_tempreserved[i];
718 	used_on_disk = dd->dd_phys->dd_used_bytes;
719 
720 	/*
721 	 * On the first iteration, fetch the dataset's used-on-disk and
722 	 * refreservation values. Also, if checkrefquota is set, test if
723 	 * allocating this space would exceed the dataset's refquota.
724 	 */
725 	if (first && tx->tx_objset) {
726 		int error;
727 		dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
728 
729 		error = dsl_dataset_check_quota(ds, checkrefquota,
730 		    asize, est_inflight, &used_on_disk, &ref_rsrv);
731 		if (error) {
732 			mutex_exit(&dd->dd_lock);
733 			return (error);
734 		}
735 	}
736 
737 	/*
738 	 * If this transaction will result in a net free of space,
739 	 * we want to let it through.
740 	 */
741 	if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
742 		quota = UINT64_MAX;
743 	else
744 		quota = dd->dd_phys->dd_quota;
745 
746 	/*
747 	 * Adjust the quota against the actual pool size at the root
748 	 * minus any outstanding deferred frees.
749 	 * To ensure that it's possible to remove files from a full
750 	 * pool without inducing transient overcommits, we throttle
751 	 * netfree transactions against a quota that is slightly larger,
752 	 * but still within the pool's allocation slop.  In cases where
753 	 * we're very close to full, this will allow a steady trickle of
754 	 * removes to get through.
755 	 */
756 	if (dd->dd_parent == NULL) {
757 		spa_t *spa = dd->dd_pool->dp_spa;
758 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
759 		deferred = metaslab_class_get_deferred(spa_normal_class(spa));
760 		if (poolsize - deferred < quota) {
761 			quota = poolsize - deferred;
762 			retval = ENOSPC;
763 		}
764 	}
765 
766 	/*
767 	 * If they are requesting more space, and our current estimate
768 	 * is over quota, they get to try again unless the actual
769 	 * on-disk is over quota and there are no pending changes (which
770 	 * may free up space for us).
771 	 */
772 	if (used_on_disk + est_inflight >= quota) {
773 		if (est_inflight > 0 || used_on_disk < quota ||
774 		    (retval == ENOSPC && used_on_disk < quota + deferred))
775 			retval = ERESTART;
776 		dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
777 		    "quota=%lluK tr=%lluK err=%d\n",
778 		    used_on_disk>>10, est_inflight>>10,
779 		    quota>>10, asize>>10, retval);
780 		mutex_exit(&dd->dd_lock);
781 		return (retval);
782 	}
783 
784 	/* We need to up our estimated delta before dropping dd_lock */
785 	dd->dd_tempreserved[txgidx] += asize;
786 
787 	parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
788 	    asize - ref_rsrv);
789 	mutex_exit(&dd->dd_lock);
790 
791 	tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
792 	tr->tr_ds = dd;
793 	tr->tr_size = asize;
794 	list_insert_tail(tr_list, tr);
795 
796 	/* see if it's OK with our parent */
797 	if (dd->dd_parent && parent_rsrv) {
798 		boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
799 
800 		return (dsl_dir_tempreserve_impl(dd->dd_parent,
801 		    parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
802 	} else {
803 		return (0);
804 	}
805 }
806 
807 /*
808  * Reserve space in this dsl_dir, to be used in this tx's txg.
809  * After the space has been dirtied (and dsl_dir_willuse_space()
810  * has been called), the reservation should be canceled, using
811  * dsl_dir_tempreserve_clear().
812  */
813 int
814 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
815     uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
816 {
817 	int err;
818 	list_t *tr_list;
819 
820 	if (asize == 0) {
821 		*tr_cookiep = NULL;
822 		return (0);
823 	}
824 
825 	tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
826 	list_create(tr_list, sizeof (struct tempreserve),
827 	    offsetof(struct tempreserve, tr_node));
828 	ASSERT3S(asize, >, 0);
829 	ASSERT3S(fsize, >=, 0);
830 
831 	err = arc_tempreserve_space(lsize, tx->tx_txg);
832 	if (err == 0) {
833 		struct tempreserve *tr;
834 
835 		tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
836 		tr->tr_size = lsize;
837 		list_insert_tail(tr_list, tr);
838 
839 		err = dsl_pool_tempreserve_space(dd->dd_pool, asize, tx);
840 	} else {
841 		if (err == EAGAIN) {
842 			txg_delay(dd->dd_pool, tx->tx_txg, 1);
843 			err = ERESTART;
844 		}
845 		dsl_pool_memory_pressure(dd->dd_pool);
846 	}
847 
848 	if (err == 0) {
849 		struct tempreserve *tr;
850 
851 		tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
852 		tr->tr_dp = dd->dd_pool;
853 		tr->tr_size = asize;
854 		list_insert_tail(tr_list, tr);
855 
856 		err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
857 		    FALSE, asize > usize, tr_list, tx, TRUE);
858 	}
859 
860 	if (err)
861 		dsl_dir_tempreserve_clear(tr_list, tx);
862 	else
863 		*tr_cookiep = tr_list;
864 
865 	return (err);
866 }
867 
868 /*
869  * Clear a temporary reservation that we previously made with
870  * dsl_dir_tempreserve_space().
871  */
872 void
873 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
874 {
875 	int txgidx = tx->tx_txg & TXG_MASK;
876 	list_t *tr_list = tr_cookie;
877 	struct tempreserve *tr;
878 
879 	ASSERT3U(tx->tx_txg, !=, 0);
880 
881 	if (tr_cookie == NULL)
882 		return;
883 
884 	while (tr = list_head(tr_list)) {
885 		if (tr->tr_dp) {
886 			dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
887 		} else if (tr->tr_ds) {
888 			mutex_enter(&tr->tr_ds->dd_lock);
889 			ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
890 			    tr->tr_size);
891 			tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
892 			mutex_exit(&tr->tr_ds->dd_lock);
893 		} else {
894 			arc_tempreserve_clear(tr->tr_size);
895 		}
896 		list_remove(tr_list, tr);
897 		kmem_free(tr, sizeof (struct tempreserve));
898 	}
899 
900 	kmem_free(tr_list, sizeof (list_t));
901 }
902 
903 static void
904 dsl_dir_willuse_space_impl(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
905 {
906 	int64_t parent_space;
907 	uint64_t est_used;
908 
909 	mutex_enter(&dd->dd_lock);
910 	if (space > 0)
911 		dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
912 
913 	est_used = dsl_dir_space_towrite(dd) + dd->dd_phys->dd_used_bytes;
914 	parent_space = parent_delta(dd, est_used, space);
915 	mutex_exit(&dd->dd_lock);
916 
917 	/* Make sure that we clean up dd_space_to* */
918 	dsl_dir_dirty(dd, tx);
919 
920 	/* XXX this is potentially expensive and unnecessary... */
921 	if (parent_space && dd->dd_parent)
922 		dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx);
923 }
924 
925 /*
926  * Call in open context when we think we're going to write/free space,
927  * eg. when dirtying data.  Be conservative (ie. OK to write less than
928  * this or free more than this, but don't write more or free less).
929  */
930 void
931 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
932 {
933 	dsl_pool_willuse_space(dd->dd_pool, space, tx);
934 	dsl_dir_willuse_space_impl(dd, space, tx);
935 }
936 
937 /* call from syncing context when we actually write/free space for this dd */
938 void
939 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
940     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
941 {
942 	int64_t accounted_delta;
943 	boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
944 
945 	ASSERT(dmu_tx_is_syncing(tx));
946 	ASSERT(type < DD_USED_NUM);
947 
948 	dsl_dir_dirty(dd, tx);
949 
950 	if (needlock)
951 		mutex_enter(&dd->dd_lock);
952 	accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
953 	ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
954 	ASSERT(compressed >= 0 ||
955 	    dd->dd_phys->dd_compressed_bytes >= -compressed);
956 	ASSERT(uncompressed >= 0 ||
957 	    dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
958 	dd->dd_phys->dd_used_bytes += used;
959 	dd->dd_phys->dd_uncompressed_bytes += uncompressed;
960 	dd->dd_phys->dd_compressed_bytes += compressed;
961 
962 	if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
963 		ASSERT(used > 0 ||
964 		    dd->dd_phys->dd_used_breakdown[type] >= -used);
965 		dd->dd_phys->dd_used_breakdown[type] += used;
966 #ifdef DEBUG
967 		dd_used_t t;
968 		uint64_t u = 0;
969 		for (t = 0; t < DD_USED_NUM; t++)
970 			u += dd->dd_phys->dd_used_breakdown[t];
971 		ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
972 #endif
973 	}
974 	if (needlock)
975 		mutex_exit(&dd->dd_lock);
976 
977 	if (dd->dd_parent != NULL) {
978 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
979 		    accounted_delta, compressed, uncompressed, tx);
980 		dsl_dir_transfer_space(dd->dd_parent,
981 		    used - accounted_delta,
982 		    DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
983 	}
984 }
985 
986 void
987 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
988     dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
989 {
990 	boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
991 
992 	ASSERT(dmu_tx_is_syncing(tx));
993 	ASSERT(oldtype < DD_USED_NUM);
994 	ASSERT(newtype < DD_USED_NUM);
995 
996 	if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
997 		return;
998 
999 	dsl_dir_dirty(dd, tx);
1000 	if (needlock)
1001 		mutex_enter(&dd->dd_lock);
1002 	ASSERT(delta > 0 ?
1003 	    dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
1004 	    dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
1005 	ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
1006 	dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
1007 	dd->dd_phys->dd_used_breakdown[newtype] += delta;
1008 	if (needlock)
1009 		mutex_exit(&dd->dd_lock);
1010 }
1011 
1012 static int
1013 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
1014 {
1015 	dsl_dataset_t *ds = arg1;
1016 	dsl_dir_t *dd = ds->ds_dir;
1017 	dsl_prop_setarg_t *psa = arg2;
1018 	int err;
1019 	uint64_t towrite;
1020 
1021 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1022 		return (err);
1023 
1024 	if (psa->psa_effective_value == 0)
1025 		return (0);
1026 
1027 	mutex_enter(&dd->dd_lock);
1028 	/*
1029 	 * If we are doing the preliminary check in open context, and
1030 	 * there are pending changes, then don't fail it, since the
1031 	 * pending changes could under-estimate the amount of space to be
1032 	 * freed up.
1033 	 */
1034 	towrite = dsl_dir_space_towrite(dd);
1035 	if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1036 	    (psa->psa_effective_value < dd->dd_phys->dd_reserved ||
1037 	    psa->psa_effective_value < dd->dd_phys->dd_used_bytes + towrite)) {
1038 		err = ENOSPC;
1039 	}
1040 	mutex_exit(&dd->dd_lock);
1041 	return (err);
1042 }
1043 
1044 extern dsl_syncfunc_t dsl_prop_set_sync;
1045 
1046 static void
1047 dsl_dir_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1048 {
1049 	dsl_dataset_t *ds = arg1;
1050 	dsl_dir_t *dd = ds->ds_dir;
1051 	dsl_prop_setarg_t *psa = arg2;
1052 	uint64_t effective_value = psa->psa_effective_value;
1053 
1054 	dsl_prop_set_sync(ds, psa, tx);
1055 	DSL_PROP_CHECK_PREDICTION(dd, psa);
1056 
1057 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1058 
1059 	mutex_enter(&dd->dd_lock);
1060 	dd->dd_phys->dd_quota = effective_value;
1061 	mutex_exit(&dd->dd_lock);
1062 
1063 	spa_history_log_internal(LOG_DS_QUOTA, dd->dd_pool->dp_spa,
1064 	    tx, "%lld dataset = %llu ",
1065 	    (longlong_t)effective_value, dd->dd_phys->dd_head_dataset_obj);
1066 }
1067 
1068 int
1069 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1070 {
1071 	dsl_dir_t *dd;
1072 	dsl_dataset_t *ds;
1073 	dsl_prop_setarg_t psa;
1074 	int err;
1075 
1076 	dsl_prop_setarg_init_uint64(&psa, "quota", source, &quota);
1077 
1078 	err = dsl_dataset_hold(ddname, FTAG, &ds);
1079 	if (err)
1080 		return (err);
1081 
1082 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1083 	if (err) {
1084 		dsl_dataset_rele(ds, FTAG);
1085 		return (err);
1086 	}
1087 
1088 	ASSERT(ds->ds_dir == dd);
1089 
1090 	/*
1091 	 * If someone removes a file, then tries to set the quota, we want to
1092 	 * make sure the file freeing takes effect.
1093 	 */
1094 	txg_wait_open(dd->dd_pool, 0);
1095 
1096 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
1097 	    dsl_dir_set_quota_sync, ds, &psa, 0);
1098 
1099 	dsl_dir_close(dd, FTAG);
1100 	dsl_dataset_rele(ds, FTAG);
1101 	return (err);
1102 }
1103 
1104 int
1105 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
1106 {
1107 	dsl_dataset_t *ds = arg1;
1108 	dsl_dir_t *dd = ds->ds_dir;
1109 	dsl_prop_setarg_t *psa = arg2;
1110 	uint64_t effective_value;
1111 	uint64_t used, avail;
1112 	int err;
1113 
1114 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1115 		return (err);
1116 
1117 	effective_value = psa->psa_effective_value;
1118 
1119 	/*
1120 	 * If we are doing the preliminary check in open context, the
1121 	 * space estimates may be inaccurate.
1122 	 */
1123 	if (!dmu_tx_is_syncing(tx))
1124 		return (0);
1125 
1126 	mutex_enter(&dd->dd_lock);
1127 	used = dd->dd_phys->dd_used_bytes;
1128 	mutex_exit(&dd->dd_lock);
1129 
1130 	if (dd->dd_parent) {
1131 		avail = dsl_dir_space_available(dd->dd_parent,
1132 		    NULL, 0, FALSE);
1133 	} else {
1134 		avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1135 	}
1136 
1137 	if (MAX(used, effective_value) > MAX(used, dd->dd_phys->dd_reserved)) {
1138 		uint64_t delta = MAX(used, effective_value) -
1139 		    MAX(used, dd->dd_phys->dd_reserved);
1140 
1141 		if (delta > avail)
1142 			return (ENOSPC);
1143 		if (dd->dd_phys->dd_quota > 0 &&
1144 		    effective_value > dd->dd_phys->dd_quota)
1145 			return (ENOSPC);
1146 	}
1147 
1148 	return (0);
1149 }
1150 
1151 static void
1152 dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1153 {
1154 	dsl_dataset_t *ds = arg1;
1155 	dsl_dir_t *dd = ds->ds_dir;
1156 	dsl_prop_setarg_t *psa = arg2;
1157 	uint64_t effective_value = psa->psa_effective_value;
1158 	uint64_t used;
1159 	int64_t delta;
1160 
1161 	dsl_prop_set_sync(ds, psa, tx);
1162 	DSL_PROP_CHECK_PREDICTION(dd, psa);
1163 
1164 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1165 
1166 	mutex_enter(&dd->dd_lock);
1167 	used = dd->dd_phys->dd_used_bytes;
1168 	delta = MAX(used, effective_value) -
1169 	    MAX(used, dd->dd_phys->dd_reserved);
1170 	dd->dd_phys->dd_reserved = effective_value;
1171 
1172 	if (dd->dd_parent != NULL) {
1173 		/* Roll up this additional usage into our ancestors */
1174 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1175 		    delta, 0, 0, tx);
1176 	}
1177 	mutex_exit(&dd->dd_lock);
1178 
1179 	spa_history_log_internal(LOG_DS_RESERVATION, dd->dd_pool->dp_spa,
1180 	    tx, "%lld dataset = %llu",
1181 	    (longlong_t)effective_value, dd->dd_phys->dd_head_dataset_obj);
1182 }
1183 
1184 int
1185 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1186     uint64_t reservation)
1187 {
1188 	dsl_dir_t *dd;
1189 	dsl_dataset_t *ds;
1190 	dsl_prop_setarg_t psa;
1191 	int err;
1192 
1193 	dsl_prop_setarg_init_uint64(&psa, "reservation", source, &reservation);
1194 
1195 	err = dsl_dataset_hold(ddname, FTAG, &ds);
1196 	if (err)
1197 		return (err);
1198 
1199 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1200 	if (err) {
1201 		dsl_dataset_rele(ds, FTAG);
1202 		return (err);
1203 	}
1204 
1205 	ASSERT(ds->ds_dir == dd);
1206 
1207 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1208 	    dsl_dir_set_reservation_sync, ds, &psa, 0);
1209 
1210 	dsl_dir_close(dd, FTAG);
1211 	dsl_dataset_rele(ds, FTAG);
1212 	return (err);
1213 }
1214 
1215 static dsl_dir_t *
1216 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1217 {
1218 	for (; ds1; ds1 = ds1->dd_parent) {
1219 		dsl_dir_t *dd;
1220 		for (dd = ds2; dd; dd = dd->dd_parent) {
1221 			if (ds1 == dd)
1222 				return (dd);
1223 		}
1224 	}
1225 	return (NULL);
1226 }
1227 
1228 /*
1229  * If delta is applied to dd, how much of that delta would be applied to
1230  * ancestor?  Syncing context only.
1231  */
1232 static int64_t
1233 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1234 {
1235 	if (dd == ancestor)
1236 		return (delta);
1237 
1238 	mutex_enter(&dd->dd_lock);
1239 	delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, delta);
1240 	mutex_exit(&dd->dd_lock);
1241 	return (would_change(dd->dd_parent, delta, ancestor));
1242 }
1243 
1244 struct renamearg {
1245 	dsl_dir_t *newparent;
1246 	const char *mynewname;
1247 };
1248 
1249 static int
1250 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1251 {
1252 	dsl_dir_t *dd = arg1;
1253 	struct renamearg *ra = arg2;
1254 	dsl_pool_t *dp = dd->dd_pool;
1255 	objset_t *mos = dp->dp_meta_objset;
1256 	int err;
1257 	uint64_t val;
1258 
1259 	/*
1260 	 * There should only be one reference, from dmu_objset_rename().
1261 	 * Fleeting holds are also possible (eg, from "zfs list" getting
1262 	 * stats), but any that are present in open context will likely
1263 	 * be gone by syncing context, so only fail from syncing
1264 	 * context.
1265 	 */
1266 	if (dmu_tx_is_syncing(tx) && dmu_buf_refcount(dd->dd_dbuf) > 1)
1267 		return (EBUSY);
1268 
1269 	/* check for existing name */
1270 	err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1271 	    ra->mynewname, 8, 1, &val);
1272 	if (err == 0)
1273 		return (EEXIST);
1274 	if (err != ENOENT)
1275 		return (err);
1276 
1277 	if (ra->newparent != dd->dd_parent) {
1278 		/* is there enough space? */
1279 		uint64_t myspace =
1280 		    MAX(dd->dd_phys->dd_used_bytes, dd->dd_phys->dd_reserved);
1281 
1282 		/* no rename into our descendant */
1283 		if (closest_common_ancestor(dd, ra->newparent) == dd)
1284 			return (EINVAL);
1285 
1286 		if (err = dsl_dir_transfer_possible(dd->dd_parent,
1287 		    ra->newparent, myspace))
1288 			return (err);
1289 	}
1290 
1291 	return (0);
1292 }
1293 
1294 static void
1295 dsl_dir_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1296 {
1297 	dsl_dir_t *dd = arg1;
1298 	struct renamearg *ra = arg2;
1299 	dsl_pool_t *dp = dd->dd_pool;
1300 	objset_t *mos = dp->dp_meta_objset;
1301 	int err;
1302 
1303 	ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2);
1304 
1305 	if (ra->newparent != dd->dd_parent) {
1306 		dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1307 		    -dd->dd_phys->dd_used_bytes,
1308 		    -dd->dd_phys->dd_compressed_bytes,
1309 		    -dd->dd_phys->dd_uncompressed_bytes, tx);
1310 		dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD,
1311 		    dd->dd_phys->dd_used_bytes,
1312 		    dd->dd_phys->dd_compressed_bytes,
1313 		    dd->dd_phys->dd_uncompressed_bytes, tx);
1314 
1315 		if (dd->dd_phys->dd_reserved > dd->dd_phys->dd_used_bytes) {
1316 			uint64_t unused_rsrv = dd->dd_phys->dd_reserved -
1317 			    dd->dd_phys->dd_used_bytes;
1318 
1319 			dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1320 			    -unused_rsrv, 0, 0, tx);
1321 			dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD_RSRV,
1322 			    unused_rsrv, 0, 0, tx);
1323 		}
1324 	}
1325 
1326 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1327 
1328 	/* remove from old parent zapobj */
1329 	err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1330 	    dd->dd_myname, tx);
1331 	ASSERT3U(err, ==, 0);
1332 
1333 	(void) strcpy(dd->dd_myname, ra->mynewname);
1334 	dsl_dir_close(dd->dd_parent, dd);
1335 	dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1336 	VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1337 	    ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1338 
1339 	/* add to new parent zapobj */
1340 	err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1341 	    dd->dd_myname, 8, 1, &dd->dd_object, tx);
1342 	ASSERT3U(err, ==, 0);
1343 
1344 	spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa,
1345 	    tx, "dataset = %llu", dd->dd_phys->dd_head_dataset_obj);
1346 }
1347 
1348 int
1349 dsl_dir_rename(dsl_dir_t *dd, const char *newname)
1350 {
1351 	struct renamearg ra;
1352 	int err;
1353 
1354 	/* new parent should exist */
1355 	err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1356 	if (err)
1357 		return (err);
1358 
1359 	/* can't rename to different pool */
1360 	if (dd->dd_pool != ra.newparent->dd_pool) {
1361 		err = ENXIO;
1362 		goto out;
1363 	}
1364 
1365 	/* new name should not already exist */
1366 	if (ra.mynewname == NULL) {
1367 		err = EEXIST;
1368 		goto out;
1369 	}
1370 
1371 	err = dsl_sync_task_do(dd->dd_pool,
1372 	    dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1373 
1374 out:
1375 	dsl_dir_close(ra.newparent, FTAG);
1376 	return (err);
1377 }
1378 
1379 int
1380 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1381 {
1382 	dsl_dir_t *ancestor;
1383 	int64_t adelta;
1384 	uint64_t avail;
1385 
1386 	ancestor = closest_common_ancestor(sdd, tdd);
1387 	adelta = would_change(sdd, -space, ancestor);
1388 	avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1389 	if (avail < space)
1390 		return (ENOSPC);
1391 
1392 	return (0);
1393 }
1394 
1395 timestruc_t
1396 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1397 {
1398 	timestruc_t t;
1399 
1400 	mutex_enter(&dd->dd_lock);
1401 	t = dd->dd_snap_cmtime;
1402 	mutex_exit(&dd->dd_lock);
1403 
1404 	return (t);
1405 }
1406 
1407 void
1408 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1409 {
1410 	timestruc_t t;
1411 
1412 	gethrestime(&t);
1413 	mutex_enter(&dd->dd_lock);
1414 	dd->dd_snap_cmtime = t;
1415 	mutex_exit(&dd->dd_lock);
1416 }
1417