xref: /titanic_50/usr/src/uts/common/fs/zfs/dsl_dir.c (revision b9bc7f7832704fda46b4d6b04f3f7be1227dc644)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/dmu.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dsl_deleg.h>
36 #include <sys/spa.h>
37 #include <sys/zap.h>
38 #include <sys/zio.h>
39 #include <sys/arc.h>
40 #include <sys/sunddi.h>
41 #include "zfs_namecheck.h"
42 
43 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
44 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2,
45     cred_t *cr, dmu_tx_t *tx);
46 
47 
48 /* ARGSUSED */
49 static void
50 dsl_dir_evict(dmu_buf_t *db, void *arg)
51 {
52 	dsl_dir_t *dd = arg;
53 	dsl_pool_t *dp = dd->dd_pool;
54 	int t;
55 
56 	for (t = 0; t < TXG_SIZE; t++) {
57 		ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
58 		ASSERT(dd->dd_tempreserved[t] == 0);
59 		ASSERT(dd->dd_space_towrite[t] == 0);
60 	}
61 
62 	ASSERT3U(dd->dd_used_bytes, ==, dd->dd_phys->dd_used_bytes);
63 
64 	if (dd->dd_parent)
65 		dsl_dir_close(dd->dd_parent, dd);
66 
67 	spa_close(dd->dd_pool->dp_spa, dd);
68 
69 	/*
70 	 * The props callback list should be empty since they hold the
71 	 * dir open.
72 	 */
73 	list_destroy(&dd->dd_prop_cbs);
74 	mutex_destroy(&dd->dd_lock);
75 	kmem_free(dd, sizeof (dsl_dir_t));
76 }
77 
78 int
79 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
80     const char *tail, void *tag, dsl_dir_t **ddp)
81 {
82 	dmu_buf_t *dbuf;
83 	dsl_dir_t *dd;
84 	int err;
85 
86 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
87 	    dsl_pool_sync_context(dp));
88 
89 	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
90 	if (err)
91 		return (err);
92 	dd = dmu_buf_get_user(dbuf);
93 #ifdef ZFS_DEBUG
94 	{
95 		dmu_object_info_t doi;
96 		dmu_object_info_from_db(dbuf, &doi);
97 		ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
98 	}
99 #endif
100 	/* XXX assert bonus buffer size is correct */
101 	if (dd == NULL) {
102 		dsl_dir_t *winner;
103 		int err;
104 
105 		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
106 		dd->dd_object = ddobj;
107 		dd->dd_dbuf = dbuf;
108 		dd->dd_pool = dp;
109 		dd->dd_phys = dbuf->db_data;
110 		dd->dd_used_bytes = dd->dd_phys->dd_used_bytes;
111 		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
112 
113 		list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
114 		    offsetof(dsl_prop_cb_record_t, cbr_node));
115 
116 		if (dd->dd_phys->dd_parent_obj) {
117 			err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
118 			    NULL, dd, &dd->dd_parent);
119 			if (err) {
120 				mutex_destroy(&dd->dd_lock);
121 				kmem_free(dd, sizeof (dsl_dir_t));
122 				dmu_buf_rele(dbuf, tag);
123 				return (err);
124 			}
125 			if (tail) {
126 #ifdef ZFS_DEBUG
127 				uint64_t foundobj;
128 
129 				err = zap_lookup(dp->dp_meta_objset,
130 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
131 				    tail, sizeof (foundobj), 1, &foundobj);
132 				ASSERT(err || foundobj == ddobj);
133 #endif
134 				(void) strcpy(dd->dd_myname, tail);
135 			} else {
136 				err = zap_value_search(dp->dp_meta_objset,
137 				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
138 				    ddobj, 0, dd->dd_myname);
139 			}
140 			if (err) {
141 				dsl_dir_close(dd->dd_parent, dd);
142 				mutex_destroy(&dd->dd_lock);
143 				kmem_free(dd, sizeof (dsl_dir_t));
144 				dmu_buf_rele(dbuf, tag);
145 				return (err);
146 			}
147 		} else {
148 			(void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
149 		}
150 
151 		winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
152 		    dsl_dir_evict);
153 		if (winner) {
154 			if (dd->dd_parent)
155 				dsl_dir_close(dd->dd_parent, dd);
156 			mutex_destroy(&dd->dd_lock);
157 			kmem_free(dd, sizeof (dsl_dir_t));
158 			dd = winner;
159 		} else {
160 			spa_open_ref(dp->dp_spa, dd);
161 		}
162 	}
163 
164 	/*
165 	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
166 	 * holds on the spa.  We need the open-to-close holds because
167 	 * otherwise the spa_refcnt wouldn't change when we open a
168 	 * dir which the spa also has open, so we could incorrectly
169 	 * think it was OK to unload/export/destroy the pool.  We need
170 	 * the instantiate-to-evict hold because the dsl_dir_t has a
171 	 * pointer to the dd_pool, which has a pointer to the spa_t.
172 	 */
173 	spa_open_ref(dp->dp_spa, tag);
174 	ASSERT3P(dd->dd_pool, ==, dp);
175 	ASSERT3U(dd->dd_object, ==, ddobj);
176 	ASSERT3P(dd->dd_dbuf, ==, dbuf);
177 	*ddp = dd;
178 	return (0);
179 }
180 
181 void
182 dsl_dir_close(dsl_dir_t *dd, void *tag)
183 {
184 	dprintf_dd(dd, "%s\n", "");
185 	spa_close(dd->dd_pool->dp_spa, tag);
186 	dmu_buf_rele(dd->dd_dbuf, tag);
187 }
188 
189 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
190 void
191 dsl_dir_name(dsl_dir_t *dd, char *buf)
192 {
193 	if (dd->dd_parent) {
194 		dsl_dir_name(dd->dd_parent, buf);
195 		(void) strcat(buf, "/");
196 	} else {
197 		buf[0] = '\0';
198 	}
199 	if (!MUTEX_HELD(&dd->dd_lock)) {
200 		/*
201 		 * recursive mutex so that we can use
202 		 * dprintf_dd() with dd_lock held
203 		 */
204 		mutex_enter(&dd->dd_lock);
205 		(void) strcat(buf, dd->dd_myname);
206 		mutex_exit(&dd->dd_lock);
207 	} else {
208 		(void) strcat(buf, dd->dd_myname);
209 	}
210 }
211 
212 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
213 int
214 dsl_dir_namelen(dsl_dir_t *dd)
215 {
216 	int result = 0;
217 
218 	if (dd->dd_parent) {
219 		/* parent's name + 1 for the "/" */
220 		result = dsl_dir_namelen(dd->dd_parent) + 1;
221 	}
222 
223 	if (!MUTEX_HELD(&dd->dd_lock)) {
224 		/* see dsl_dir_name */
225 		mutex_enter(&dd->dd_lock);
226 		result += strlen(dd->dd_myname);
227 		mutex_exit(&dd->dd_lock);
228 	} else {
229 		result += strlen(dd->dd_myname);
230 	}
231 
232 	return (result);
233 }
234 
235 int
236 dsl_dir_is_private(dsl_dir_t *dd)
237 {
238 	int rv = FALSE;
239 
240 	if (dd->dd_parent && dsl_dir_is_private(dd->dd_parent))
241 		rv = TRUE;
242 	if (dataset_name_hidden(dd->dd_myname))
243 		rv = TRUE;
244 	return (rv);
245 }
246 
247 
248 static int
249 getcomponent(const char *path, char *component, const char **nextp)
250 {
251 	char *p;
252 	if (path == NULL)
253 		return (ENOENT);
254 	/* This would be a good place to reserve some namespace... */
255 	p = strpbrk(path, "/@");
256 	if (p && (p[1] == '/' || p[1] == '@')) {
257 		/* two separators in a row */
258 		return (EINVAL);
259 	}
260 	if (p == NULL || p == path) {
261 		/*
262 		 * if the first thing is an @ or /, it had better be an
263 		 * @ and it had better not have any more ats or slashes,
264 		 * and it had better have something after the @.
265 		 */
266 		if (p != NULL &&
267 		    (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
268 			return (EINVAL);
269 		if (strlen(path) >= MAXNAMELEN)
270 			return (ENAMETOOLONG);
271 		(void) strcpy(component, path);
272 		p = NULL;
273 	} else if (p[0] == '/') {
274 		if (p-path >= MAXNAMELEN)
275 			return (ENAMETOOLONG);
276 		(void) strncpy(component, path, p - path);
277 		component[p-path] = '\0';
278 		p++;
279 	} else if (p[0] == '@') {
280 		/*
281 		 * if the next separator is an @, there better not be
282 		 * any more slashes.
283 		 */
284 		if (strchr(path, '/'))
285 			return (EINVAL);
286 		if (p-path >= MAXNAMELEN)
287 			return (ENAMETOOLONG);
288 		(void) strncpy(component, path, p - path);
289 		component[p-path] = '\0';
290 	} else {
291 		ASSERT(!"invalid p");
292 	}
293 	*nextp = p;
294 	return (0);
295 }
296 
297 /*
298  * same as dsl_open_dir, ignore the first component of name and use the
299  * spa instead
300  */
301 int
302 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
303     dsl_dir_t **ddp, const char **tailp)
304 {
305 	char buf[MAXNAMELEN];
306 	const char *next, *nextnext = NULL;
307 	int err;
308 	dsl_dir_t *dd;
309 	dsl_pool_t *dp;
310 	uint64_t ddobj;
311 	int openedspa = FALSE;
312 
313 	dprintf("%s\n", name);
314 
315 	err = getcomponent(name, buf, &next);
316 	if (err)
317 		return (err);
318 	if (spa == NULL) {
319 		err = spa_open(buf, &spa, FTAG);
320 		if (err) {
321 			dprintf("spa_open(%s) failed\n", buf);
322 			return (err);
323 		}
324 		openedspa = TRUE;
325 
326 		/* XXX this assertion belongs in spa_open */
327 		ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
328 	}
329 
330 	dp = spa_get_dsl(spa);
331 
332 	rw_enter(&dp->dp_config_rwlock, RW_READER);
333 	err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
334 	if (err) {
335 		rw_exit(&dp->dp_config_rwlock);
336 		if (openedspa)
337 			spa_close(spa, FTAG);
338 		return (err);
339 	}
340 
341 	while (next != NULL) {
342 		dsl_dir_t *child_ds;
343 		err = getcomponent(next, buf, &nextnext);
344 		if (err)
345 			break;
346 		ASSERT(next[0] != '\0');
347 		if (next[0] == '@')
348 			break;
349 		dprintf("looking up %s in obj%lld\n",
350 		    buf, dd->dd_phys->dd_child_dir_zapobj);
351 
352 		err = zap_lookup(dp->dp_meta_objset,
353 		    dd->dd_phys->dd_child_dir_zapobj,
354 		    buf, sizeof (ddobj), 1, &ddobj);
355 		if (err) {
356 			if (err == ENOENT)
357 				err = 0;
358 			break;
359 		}
360 
361 		err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
362 		if (err)
363 			break;
364 		dsl_dir_close(dd, tag);
365 		dd = child_ds;
366 		next = nextnext;
367 	}
368 	rw_exit(&dp->dp_config_rwlock);
369 
370 	if (err) {
371 		dsl_dir_close(dd, tag);
372 		if (openedspa)
373 			spa_close(spa, FTAG);
374 		return (err);
375 	}
376 
377 	/*
378 	 * It's an error if there's more than one component left, or
379 	 * tailp==NULL and there's any component left.
380 	 */
381 	if (next != NULL &&
382 	    (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
383 		/* bad path name */
384 		dsl_dir_close(dd, tag);
385 		dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
386 		err = ENOENT;
387 	}
388 	if (tailp)
389 		*tailp = next;
390 	if (openedspa)
391 		spa_close(spa, FTAG);
392 	*ddp = dd;
393 	return (err);
394 }
395 
396 /*
397  * Return the dsl_dir_t, and possibly the last component which couldn't
398  * be found in *tail.  Return NULL if the path is bogus, or if
399  * tail==NULL and we couldn't parse the whole name.  (*tail)[0] == '@'
400  * means that the last component is a snapshot.
401  */
402 int
403 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
404 {
405 	return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
406 }
407 
408 uint64_t
409 dsl_dir_create_sync(dsl_dir_t *pds, const char *name, dmu_tx_t *tx)
410 {
411 	objset_t *mos = pds->dd_pool->dp_meta_objset;
412 	uint64_t ddobj;
413 	dsl_dir_phys_t *dsphys;
414 	dmu_buf_t *dbuf;
415 
416 	ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
417 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
418 	VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
419 	    name, sizeof (uint64_t), 1, &ddobj, tx));
420 	VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
421 	dmu_buf_will_dirty(dbuf, tx);
422 	dsphys = dbuf->db_data;
423 
424 	dsphys->dd_creation_time = gethrestime_sec();
425 	dsphys->dd_parent_obj = pds->dd_object;
426 	dsphys->dd_props_zapobj = zap_create(mos,
427 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
428 	dsphys->dd_child_dir_zapobj = zap_create(mos,
429 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
430 	dmu_buf_rele(dbuf, FTAG);
431 
432 	return (ddobj);
433 }
434 
435 /* ARGSUSED */
436 int
437 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
438 {
439 	dsl_dir_t *dd = arg1;
440 	dsl_pool_t *dp = dd->dd_pool;
441 	objset_t *mos = dp->dp_meta_objset;
442 	int err;
443 	uint64_t count;
444 
445 	/*
446 	 * There should be exactly two holds, both from
447 	 * dsl_dataset_destroy: one on the dd directory, and one on its
448 	 * head ds.  Otherwise, someone is trying to lookup something
449 	 * inside this dir while we want to destroy it.  The
450 	 * config_rwlock ensures that nobody else opens it after we
451 	 * check.
452 	 */
453 	if (dmu_buf_refcount(dd->dd_dbuf) > 2)
454 		return (EBUSY);
455 
456 	err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
457 	if (err)
458 		return (err);
459 	if (count != 0)
460 		return (EEXIST);
461 
462 	return (0);
463 }
464 
465 void
466 dsl_dir_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
467 {
468 	dsl_dir_t *dd = arg1;
469 	objset_t *mos = dd->dd_pool->dp_meta_objset;
470 	uint64_t val, obj;
471 
472 	ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
473 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
474 
475 	/* Remove our reservation. */
476 	val = 0;
477 	dsl_dir_set_reservation_sync(dd, &val, cr, tx);
478 	ASSERT3U(dd->dd_used_bytes, ==, 0);
479 	ASSERT3U(dd->dd_phys->dd_reserved, ==, 0);
480 
481 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
482 	VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
483 	VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
484 	VERIFY(0 == zap_remove(mos,
485 	    dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
486 
487 	obj = dd->dd_object;
488 	dsl_dir_close(dd, tag);
489 	VERIFY(0 == dmu_object_free(mos, obj, tx));
490 }
491 
492 void
493 dsl_dir_create_root(objset_t *mos, uint64_t *ddobjp, dmu_tx_t *tx)
494 {
495 	dsl_dir_phys_t *dsp;
496 	dmu_buf_t *dbuf;
497 	int error;
498 
499 	*ddobjp = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
500 	    DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
501 
502 	error = zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET,
503 	    sizeof (uint64_t), 1, ddobjp, tx);
504 	ASSERT3U(error, ==, 0);
505 
506 	VERIFY(0 == dmu_bonus_hold(mos, *ddobjp, FTAG, &dbuf));
507 	dmu_buf_will_dirty(dbuf, tx);
508 	dsp = dbuf->db_data;
509 
510 	dsp->dd_creation_time = gethrestime_sec();
511 	dsp->dd_props_zapobj = zap_create(mos,
512 	    DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
513 	dsp->dd_child_dir_zapobj = zap_create(mos,
514 	    DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
515 
516 	dmu_buf_rele(dbuf, FTAG);
517 }
518 
519 void
520 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
521 {
522 	mutex_enter(&dd->dd_lock);
523 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, dd->dd_used_bytes);
524 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
525 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
526 	    dd->dd_phys->dd_reserved);
527 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
528 	    dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
529 	    (dd->dd_phys->dd_uncompressed_bytes * 100 /
530 	    dd->dd_phys->dd_compressed_bytes));
531 	mutex_exit(&dd->dd_lock);
532 
533 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
534 	if (dd->dd_phys->dd_origin_obj) {
535 		dsl_dataset_t *ds;
536 		char buf[MAXNAMELEN];
537 
538 		VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool,
539 		    dd->dd_phys->dd_origin_obj,
540 		    NULL, DS_MODE_NONE, FTAG, &ds));
541 		dsl_dataset_name(ds, buf);
542 		dsl_dataset_close(ds, DS_MODE_NONE, FTAG);
543 		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
544 	}
545 	rw_exit(&dd->dd_pool->dp_config_rwlock);
546 }
547 
548 void
549 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
550 {
551 	dsl_pool_t *dp = dd->dd_pool;
552 
553 	ASSERT(dd->dd_phys);
554 
555 	if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
556 		/* up the hold count until we can be written out */
557 		dmu_buf_add_ref(dd->dd_dbuf, dd);
558 	}
559 }
560 
561 static int64_t
562 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
563 {
564 	uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
565 	uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
566 	return (new_accounted - old_accounted);
567 }
568 
569 void
570 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
571 {
572 	ASSERT(dmu_tx_is_syncing(tx));
573 
574 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
575 
576 	mutex_enter(&dd->dd_lock);
577 	ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0);
578 	dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
579 	    dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
580 	dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
581 	dd->dd_phys->dd_used_bytes = dd->dd_used_bytes;
582 	mutex_exit(&dd->dd_lock);
583 
584 	/* release the hold from dsl_dir_dirty */
585 	dmu_buf_rele(dd->dd_dbuf, dd);
586 }
587 
588 static uint64_t
589 dsl_dir_space_towrite(dsl_dir_t *dd)
590 {
591 	uint64_t space = 0;
592 	int i;
593 
594 	ASSERT(MUTEX_HELD(&dd->dd_lock));
595 
596 	for (i = 0; i < TXG_SIZE; i++) {
597 		space += dd->dd_space_towrite[i&TXG_MASK];
598 		ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
599 	}
600 	return (space);
601 }
602 
603 /*
604  * How much space would dd have available if ancestor had delta applied
605  * to it?  If ondiskonly is set, we're only interested in what's
606  * on-disk, not estimated pending changes.
607  */
608 uint64_t
609 dsl_dir_space_available(dsl_dir_t *dd,
610     dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
611 {
612 	uint64_t parentspace, myspace, quota, used;
613 
614 	/*
615 	 * If there are no restrictions otherwise, assume we have
616 	 * unlimited space available.
617 	 */
618 	quota = UINT64_MAX;
619 	parentspace = UINT64_MAX;
620 
621 	if (dd->dd_parent != NULL) {
622 		parentspace = dsl_dir_space_available(dd->dd_parent,
623 		    ancestor, delta, ondiskonly);
624 	}
625 
626 	mutex_enter(&dd->dd_lock);
627 	if (dd->dd_phys->dd_quota != 0)
628 		quota = dd->dd_phys->dd_quota;
629 	used = dd->dd_used_bytes;
630 	if (!ondiskonly)
631 		used += dsl_dir_space_towrite(dd);
632 	if (dd == ancestor)
633 		used += delta;
634 
635 	if (dd->dd_parent == NULL) {
636 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
637 		quota = MIN(quota, poolsize);
638 	}
639 
640 	if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
641 		/*
642 		 * We have some space reserved, in addition to what our
643 		 * parent gave us.
644 		 */
645 		parentspace += dd->dd_phys->dd_reserved - used;
646 	}
647 
648 	if (used > quota) {
649 		/* over quota */
650 		myspace = 0;
651 
652 		/*
653 		 * While it's OK to be a little over quota, if
654 		 * we think we are using more space than there
655 		 * is in the pool (which is already 1.6% more than
656 		 * dsl_pool_adjustedsize()), something is very
657 		 * wrong.
658 		 */
659 		ASSERT3U(used, <=, spa_get_space(dd->dd_pool->dp_spa));
660 	} else {
661 		/*
662 		 * the lesser of the space provided by our parent and
663 		 * the space left in our quota
664 		 */
665 		myspace = MIN(parentspace, quota - used);
666 	}
667 
668 	mutex_exit(&dd->dd_lock);
669 
670 	return (myspace);
671 }
672 
673 struct tempreserve {
674 	list_node_t tr_node;
675 	dsl_dir_t *tr_ds;
676 	uint64_t tr_size;
677 };
678 
679 static int
680 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
681     boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
682     dmu_tx_t *tx)
683 {
684 	uint64_t txg = tx->tx_txg;
685 	uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
686 	struct tempreserve *tr;
687 	int enospc = EDQUOT;
688 	int txgidx = txg & TXG_MASK;
689 	int i;
690 
691 	ASSERT3U(txg, !=, 0);
692 	ASSERT3S(asize, >, 0);
693 
694 	mutex_enter(&dd->dd_lock);
695 
696 	/*
697 	 * Check against the dsl_dir's quota.  We don't add in the delta
698 	 * when checking for over-quota because they get one free hit.
699 	 */
700 	est_inflight = dsl_dir_space_towrite(dd);
701 	for (i = 0; i < TXG_SIZE; i++)
702 		est_inflight += dd->dd_tempreserved[i];
703 	used_on_disk = dd->dd_used_bytes;
704 
705 	/*
706 	 * Check for dataset reference quota on first iteration.
707 	 */
708 	if (list_head(tr_list) == NULL && tx->tx_objset) {
709 		int error;
710 
711 		dsl_dataset_t *ds = tx->tx_objset->os->os_dsl_dataset;
712 		error = dsl_dataset_check_quota(ds, checkrefquota,
713 		    asize, est_inflight, &used_on_disk);
714 		if (error) {
715 			mutex_exit(&dd->dd_lock);
716 			return (error);
717 		}
718 	}
719 
720 	/*
721 	 * If this transaction will result in a net free of space,
722 	 * we want to let it through.
723 	 */
724 	if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
725 		quota = UINT64_MAX;
726 	else
727 		quota = dd->dd_phys->dd_quota;
728 
729 	/*
730 	 * Adjust the quota against the actual pool size at the root.
731 	 * To ensure that it's possible to remove files from a full
732 	 * pool without inducing transient overcommits, we throttle
733 	 * netfree transactions against a quota that is slightly larger,
734 	 * but still within the pool's allocation slop.  In cases where
735 	 * we're very close to full, this will allow a steady trickle of
736 	 * removes to get through.
737 	 */
738 	if (dd->dd_parent == NULL) {
739 		uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
740 		if (poolsize < quota) {
741 			quota = poolsize;
742 			enospc = ENOSPC;
743 		}
744 	}
745 
746 	/*
747 	 * If they are requesting more space, and our current estimate
748 	 * is over quota, they get to try again unless the actual
749 	 * on-disk is over quota and there are no pending changes (which
750 	 * may free up space for us).
751 	 */
752 	if (used_on_disk + est_inflight > quota) {
753 		if (est_inflight > 0 || used_on_disk < quota)
754 			enospc = ERESTART;
755 		dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
756 		    "quota=%lluK tr=%lluK err=%d\n",
757 		    used_on_disk>>10, est_inflight>>10,
758 		    quota>>10, asize>>10, enospc);
759 		mutex_exit(&dd->dd_lock);
760 		return (enospc);
761 	}
762 
763 	/* We need to up our estimated delta before dropping dd_lock */
764 	dd->dd_tempreserved[txgidx] += asize;
765 
766 	parent_rsrv = parent_delta(dd, used_on_disk + est_inflight, asize);
767 	mutex_exit(&dd->dd_lock);
768 
769 	tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP);
770 	tr->tr_ds = dd;
771 	tr->tr_size = asize;
772 	list_insert_tail(tr_list, tr);
773 
774 	/* see if it's OK with our parent */
775 	if (dd->dd_parent && parent_rsrv) {
776 		boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
777 
778 		return (dsl_dir_tempreserve_impl(dd->dd_parent,
779 		    parent_rsrv, netfree, ismos, TRUE, tr_list, tx));
780 	} else {
781 		return (0);
782 	}
783 }
784 
785 /*
786  * Reserve space in this dsl_dir, to be used in this tx's txg.
787  * After the space has been dirtied (and dsl_dir_willuse_space()
788  * has been called), the reservation should be canceled, using
789  * dsl_dir_tempreserve_clear().
790  */
791 int
792 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
793     uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
794 {
795 	int err = 0;
796 	list_t *tr_list;
797 
798 	if (asize == 0) {
799 		*tr_cookiep = NULL;
800 		return (0);
801 	}
802 
803 	tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
804 	list_create(tr_list, sizeof (struct tempreserve),
805 	    offsetof(struct tempreserve, tr_node));
806 	ASSERT3S(asize, >, 0);
807 	ASSERT3S(fsize, >=, 0);
808 
809 	err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize, FALSE,
810 	    asize > usize, tr_list, tx);
811 
812 	if (err == 0) {
813 		struct tempreserve *tr;
814 
815 		err = arc_tempreserve_space(lsize);
816 		if (err == 0) {
817 			tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP);
818 			tr->tr_ds = NULL;
819 			tr->tr_size = lsize;
820 			list_insert_tail(tr_list, tr);
821 		}
822 	}
823 
824 	if (err)
825 		dsl_dir_tempreserve_clear(tr_list, tx);
826 	else
827 		*tr_cookiep = tr_list;
828 	return (err);
829 }
830 
831 /*
832  * Clear a temporary reservation that we previously made with
833  * dsl_dir_tempreserve_space().
834  */
835 void
836 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
837 {
838 	int txgidx = tx->tx_txg & TXG_MASK;
839 	list_t *tr_list = tr_cookie;
840 	struct tempreserve *tr;
841 
842 	ASSERT3U(tx->tx_txg, !=, 0);
843 
844 	if (tr_cookie == NULL)
845 		return;
846 
847 	while (tr = list_head(tr_list)) {
848 		if (tr->tr_ds == NULL) {
849 			arc_tempreserve_clear(tr->tr_size);
850 		} else {
851 			mutex_enter(&tr->tr_ds->dd_lock);
852 			ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
853 			    tr->tr_size);
854 			tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
855 			mutex_exit(&tr->tr_ds->dd_lock);
856 		}
857 		list_remove(tr_list, tr);
858 		kmem_free(tr, sizeof (struct tempreserve));
859 	}
860 
861 	kmem_free(tr_list, sizeof (list_t));
862 }
863 
864 /*
865  * Call in open context when we think we're going to write/free space,
866  * eg. when dirtying data.  Be conservative (ie. OK to write less than
867  * this or free more than this, but don't write more or free less).
868  */
869 void
870 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
871 {
872 	int64_t parent_space;
873 	uint64_t est_used;
874 
875 	mutex_enter(&dd->dd_lock);
876 	if (space > 0)
877 		dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
878 
879 	est_used = dsl_dir_space_towrite(dd) + dd->dd_used_bytes;
880 	parent_space = parent_delta(dd, est_used, space);
881 	mutex_exit(&dd->dd_lock);
882 
883 	/* Make sure that we clean up dd_space_to* */
884 	dsl_dir_dirty(dd, tx);
885 
886 	/* XXX this is potentially expensive and unnecessary... */
887 	if (parent_space && dd->dd_parent)
888 		dsl_dir_willuse_space(dd->dd_parent, parent_space, tx);
889 }
890 
891 /* call from syncing context when we actually write/free space for this dd */
892 void
893 dsl_dir_diduse_space(dsl_dir_t *dd,
894     int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
895 {
896 	int64_t accounted_delta;
897 
898 	ASSERT(dmu_tx_is_syncing(tx));
899 
900 	dsl_dir_dirty(dd, tx);
901 
902 	mutex_enter(&dd->dd_lock);
903 	accounted_delta = parent_delta(dd, dd->dd_used_bytes, used);
904 	ASSERT(used >= 0 || dd->dd_used_bytes >= -used);
905 	ASSERT(compressed >= 0 ||
906 	    dd->dd_phys->dd_compressed_bytes >= -compressed);
907 	ASSERT(uncompressed >= 0 ||
908 	    dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
909 	dd->dd_used_bytes += used;
910 	dd->dd_phys->dd_uncompressed_bytes += uncompressed;
911 	dd->dd_phys->dd_compressed_bytes += compressed;
912 	mutex_exit(&dd->dd_lock);
913 
914 	if (dd->dd_parent != NULL) {
915 		dsl_dir_diduse_space(dd->dd_parent,
916 		    accounted_delta, compressed, uncompressed, tx);
917 	}
918 }
919 
920 static int
921 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
922 {
923 	dsl_dir_t *dd = arg1;
924 	uint64_t *quotap = arg2;
925 	uint64_t new_quota = *quotap;
926 	int err = 0;
927 	uint64_t towrite;
928 
929 	if (new_quota == 0)
930 		return (0);
931 
932 	mutex_enter(&dd->dd_lock);
933 	/*
934 	 * If we are doing the preliminary check in open context, and
935 	 * there are pending changes, then don't fail it, since the
936 	 * pending changes could under-estimate the amount of space to be
937 	 * freed up.
938 	 */
939 	towrite = dsl_dir_space_towrite(dd);
940 	if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
941 	    (new_quota < dd->dd_phys->dd_reserved ||
942 	    new_quota < dd->dd_used_bytes + towrite)) {
943 		err = ENOSPC;
944 	}
945 	mutex_exit(&dd->dd_lock);
946 	return (err);
947 }
948 
949 /* ARGSUSED */
950 static void
951 dsl_dir_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
952 {
953 	dsl_dir_t *dd = arg1;
954 	uint64_t *quotap = arg2;
955 	uint64_t new_quota = *quotap;
956 
957 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
958 
959 	mutex_enter(&dd->dd_lock);
960 	dd->dd_phys->dd_quota = new_quota;
961 	mutex_exit(&dd->dd_lock);
962 
963 	spa_history_internal_log(LOG_DS_QUOTA, dd->dd_pool->dp_spa,
964 	    tx, cr, "%lld dataset = %llu ",
965 	    (longlong_t)new_quota, dd->dd_phys->dd_head_dataset_obj);
966 }
967 
968 int
969 dsl_dir_set_quota(const char *ddname, uint64_t quota)
970 {
971 	dsl_dir_t *dd;
972 	int err;
973 
974 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
975 	if (err)
976 		return (err);
977 
978 	if (quota != dd->dd_phys->dd_quota) {
979 		/*
980 		 * If someone removes a file, then tries to set the quota, we
981 		 * want to make sure the file freeing takes effect.
982 		 */
983 		txg_wait_open(dd->dd_pool, 0);
984 
985 		err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
986 		    dsl_dir_set_quota_sync, dd, &quota, 0);
987 	}
988 	dsl_dir_close(dd, FTAG);
989 	return (err);
990 }
991 
992 int
993 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
994 {
995 	dsl_dir_t *dd = arg1;
996 	uint64_t *reservationp = arg2;
997 	uint64_t new_reservation = *reservationp;
998 	uint64_t used, avail;
999 	int64_t delta;
1000 
1001 	if (new_reservation > INT64_MAX)
1002 		return (EOVERFLOW);
1003 
1004 	/*
1005 	 * If we are doing the preliminary check in open context, the
1006 	 * space estimates may be inaccurate.
1007 	 */
1008 	if (!dmu_tx_is_syncing(tx))
1009 		return (0);
1010 
1011 	mutex_enter(&dd->dd_lock);
1012 	used = dd->dd_used_bytes;
1013 	delta = MAX(used, new_reservation) -
1014 	    MAX(used, dd->dd_phys->dd_reserved);
1015 	mutex_exit(&dd->dd_lock);
1016 
1017 	if (dd->dd_parent) {
1018 		avail = dsl_dir_space_available(dd->dd_parent,
1019 		    NULL, 0, FALSE);
1020 	} else {
1021 		avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1022 	}
1023 
1024 	if (delta > 0 && delta > avail)
1025 		return (ENOSPC);
1026 	if (delta > 0 && dd->dd_phys->dd_quota > 0 &&
1027 	    new_reservation > dd->dd_phys->dd_quota)
1028 		return (ENOSPC);
1029 	return (0);
1030 }
1031 
1032 /* ARGSUSED */
1033 static void
1034 dsl_dir_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1035 {
1036 	dsl_dir_t *dd = arg1;
1037 	uint64_t *reservationp = arg2;
1038 	uint64_t new_reservation = *reservationp;
1039 	uint64_t used;
1040 	int64_t delta;
1041 
1042 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1043 
1044 	mutex_enter(&dd->dd_lock);
1045 	used = dd->dd_used_bytes;
1046 	delta = MAX(used, new_reservation) -
1047 	    MAX(used, dd->dd_phys->dd_reserved);
1048 	dd->dd_phys->dd_reserved = new_reservation;
1049 	mutex_exit(&dd->dd_lock);
1050 
1051 	if (dd->dd_parent != NULL) {
1052 		/* Roll up this additional usage into our ancestors */
1053 		dsl_dir_diduse_space(dd->dd_parent, delta, 0, 0, tx);
1054 	}
1055 
1056 	spa_history_internal_log(LOG_DS_RESERVATION, dd->dd_pool->dp_spa,
1057 	    tx, cr, "%lld dataset = %llu",
1058 	    (longlong_t)new_reservation, dd->dd_phys->dd_head_dataset_obj);
1059 }
1060 
1061 int
1062 dsl_dir_set_reservation(const char *ddname, uint64_t reservation)
1063 {
1064 	dsl_dir_t *dd;
1065 	int err;
1066 
1067 	err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1068 	if (err)
1069 		return (err);
1070 	err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1071 	    dsl_dir_set_reservation_sync, dd, &reservation, 0);
1072 	dsl_dir_close(dd, FTAG);
1073 	return (err);
1074 }
1075 
1076 static dsl_dir_t *
1077 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1078 {
1079 	for (; ds1; ds1 = ds1->dd_parent) {
1080 		dsl_dir_t *dd;
1081 		for (dd = ds2; dd; dd = dd->dd_parent) {
1082 			if (ds1 == dd)
1083 				return (dd);
1084 		}
1085 	}
1086 	return (NULL);
1087 }
1088 
1089 /*
1090  * If delta is applied to dd, how much of that delta would be applied to
1091  * ancestor?  Syncing context only.
1092  */
1093 static int64_t
1094 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1095 {
1096 	if (dd == ancestor)
1097 		return (delta);
1098 
1099 	mutex_enter(&dd->dd_lock);
1100 	delta = parent_delta(dd, dd->dd_used_bytes, delta);
1101 	mutex_exit(&dd->dd_lock);
1102 	return (would_change(dd->dd_parent, delta, ancestor));
1103 }
1104 
1105 struct renamearg {
1106 	dsl_dir_t *newparent;
1107 	const char *mynewname;
1108 };
1109 
1110 /*ARGSUSED*/
1111 static int
1112 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1113 {
1114 	dsl_dir_t *dd = arg1;
1115 	struct renamearg *ra = arg2;
1116 	dsl_pool_t *dp = dd->dd_pool;
1117 	objset_t *mos = dp->dp_meta_objset;
1118 	int err;
1119 	uint64_t val;
1120 
1121 	/* There should be 2 references: the open and the dirty */
1122 	if (dmu_buf_refcount(dd->dd_dbuf) > 2)
1123 		return (EBUSY);
1124 
1125 	/* check for existing name */
1126 	err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1127 	    ra->mynewname, 8, 1, &val);
1128 	if (err == 0)
1129 		return (EEXIST);
1130 	if (err != ENOENT)
1131 		return (err);
1132 
1133 	if (ra->newparent != dd->dd_parent) {
1134 		/* is there enough space? */
1135 		uint64_t myspace =
1136 		    MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved);
1137 
1138 		/* no rename into our descendant */
1139 		if (closest_common_ancestor(dd, ra->newparent) == dd)
1140 			return (EINVAL);
1141 
1142 		if (err = dsl_dir_transfer_possible(dd->dd_parent,
1143 		    ra->newparent, myspace))
1144 			return (err);
1145 	}
1146 
1147 	return (0);
1148 }
1149 
1150 static void
1151 dsl_dir_rename_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1152 {
1153 	dsl_dir_t *dd = arg1;
1154 	struct renamearg *ra = arg2;
1155 	dsl_pool_t *dp = dd->dd_pool;
1156 	objset_t *mos = dp->dp_meta_objset;
1157 	int err;
1158 
1159 	ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2);
1160 
1161 	if (ra->newparent != dd->dd_parent) {
1162 		uint64_t myspace =
1163 		    MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved);
1164 
1165 		dsl_dir_diduse_space(dd->dd_parent, -myspace,
1166 		    -dd->dd_phys->dd_compressed_bytes,
1167 		    -dd->dd_phys->dd_uncompressed_bytes, tx);
1168 		dsl_dir_diduse_space(ra->newparent, myspace,
1169 		    dd->dd_phys->dd_compressed_bytes,
1170 		    dd->dd_phys->dd_uncompressed_bytes, tx);
1171 	}
1172 
1173 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
1174 
1175 	/* remove from old parent zapobj */
1176 	err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1177 	    dd->dd_myname, tx);
1178 	ASSERT3U(err, ==, 0);
1179 
1180 	(void) strcpy(dd->dd_myname, ra->mynewname);
1181 	dsl_dir_close(dd->dd_parent, dd);
1182 	dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1183 	VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1184 	    ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1185 
1186 	/* add to new parent zapobj */
1187 	err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1188 	    dd->dd_myname, 8, 1, &dd->dd_object, tx);
1189 	ASSERT3U(err, ==, 0);
1190 
1191 	spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa,
1192 	    tx, cr, "dataset = %llu", dd->dd_phys->dd_head_dataset_obj);
1193 }
1194 
1195 int
1196 dsl_dir_rename(dsl_dir_t *dd, const char *newname)
1197 {
1198 	struct renamearg ra;
1199 	int err;
1200 
1201 	/* new parent should exist */
1202 	err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1203 	if (err)
1204 		return (err);
1205 
1206 	/* can't rename to different pool */
1207 	if (dd->dd_pool != ra.newparent->dd_pool) {
1208 		err = ENXIO;
1209 		goto out;
1210 	}
1211 
1212 	/* new name should not already exist */
1213 	if (ra.mynewname == NULL) {
1214 		err = EEXIST;
1215 		goto out;
1216 	}
1217 
1218 	err = dsl_sync_task_do(dd->dd_pool,
1219 	    dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1220 
1221 out:
1222 	dsl_dir_close(ra.newparent, FTAG);
1223 	return (err);
1224 }
1225 
1226 int
1227 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1228 {
1229 	dsl_dir_t *ancestor;
1230 	int64_t adelta;
1231 	uint64_t avail;
1232 
1233 	ancestor = closest_common_ancestor(sdd, tdd);
1234 	adelta = would_change(sdd, -space, ancestor);
1235 	avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1236 	if (avail < space)
1237 		return (ENOSPC);
1238 
1239 	return (0);
1240 }
1241