1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2013 Martin Matuska. All rights reserved. 26 * Copyright (c) 2014 Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 29 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 30 * Copyright (c) 2023 Hewlett Packard Enterprise Development LP. 31 * Copyright (c) 2025, Rob Norris <robn@despairlabs.com> 32 */ 33 34 #include <sys/dmu.h> 35 #include <sys/dmu_objset.h> 36 #include <sys/dmu_tx.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dsl_prop.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/dsl_deleg.h> 42 #include <sys/dmu_impl.h> 43 #include <sys/spa.h> 44 #include <sys/spa_impl.h> 45 #include <sys/metaslab.h> 46 #include <sys/zap.h> 47 #include <sys/zio.h> 48 #include <sys/arc.h> 49 #include <sys/sunddi.h> 50 #include <sys/zfeature.h> 51 #include <sys/policy.h> 52 #include <sys/zfs_vfsops.h> 53 #include <sys/zfs_znode.h> 54 #include <sys/zvol.h> 55 #include <sys/zthr.h> 56 #include "zfs_namecheck.h" 57 #include "zfs_prop.h" 58 59 /* 60 * This controls if we verify the ZVOL quota or not. 61 * Currently, quotas are not implemented for ZVOLs. 62 * The quota size is the size of the ZVOL. 63 * The size of the volume already implies the ZVOL size quota. 64 * The quota mechanism can introduce a significant performance drop. 65 */ 66 static int zvol_enforce_quotas = B_TRUE; 67 68 /* 69 * Filesystem and Snapshot Limits 70 * ------------------------------ 71 * 72 * These limits are used to restrict the number of filesystems and/or snapshots 73 * that can be created at a given level in the tree or below. A typical 74 * use-case is with a delegated dataset where the administrator wants to ensure 75 * that a user within the zone is not creating too many additional filesystems 76 * or snapshots, even though they're not exceeding their space quota. 77 * 78 * The filesystem and snapshot counts are stored as extensible properties. This 79 * capability is controlled by a feature flag and must be enabled to be used. 80 * Once enabled, the feature is not active until the first limit is set. At 81 * that point, future operations to create/destroy filesystems or snapshots 82 * will validate and update the counts. 83 * 84 * Because the count properties will not exist before the feature is active, 85 * the counts are updated when a limit is first set on an uninitialized 86 * dsl_dir node in the tree (The filesystem/snapshot count on a node includes 87 * all of the nested filesystems/snapshots. Thus, a new leaf node has a 88 * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and 89 * snapshot count properties on a node indicate uninitialized counts on that 90 * node.) When first setting a limit on an uninitialized node, the code starts 91 * at the filesystem with the new limit and descends into all sub-filesystems 92 * to add the count properties. 93 * 94 * In practice this is lightweight since a limit is typically set when the 95 * filesystem is created and thus has no children. Once valid, changing the 96 * limit value won't require a re-traversal since the counts are already valid. 97 * When recursively fixing the counts, if a node with a limit is encountered 98 * during the descent, the counts are known to be valid and there is no need to 99 * descend into that filesystem's children. The counts on filesystems above the 100 * one with the new limit will still be uninitialized, unless a limit is 101 * eventually set on one of those filesystems. The counts are always recursively 102 * updated when a limit is set on a dataset, unless there is already a limit. 103 * When a new limit value is set on a filesystem with an existing limit, it is 104 * possible for the new limit to be less than the current count at that level 105 * since a user who can change the limit is also allowed to exceed the limit. 106 * 107 * Once the feature is active, then whenever a filesystem or snapshot is 108 * created, the code recurses up the tree, validating the new count against the 109 * limit at each initialized level. In practice, most levels will not have a 110 * limit set. If there is a limit at any initialized level up the tree, the 111 * check must pass or the creation will fail. Likewise, when a filesystem or 112 * snapshot is destroyed, the counts are recursively adjusted all the way up 113 * the initialized nodes in the tree. Renaming a filesystem into different point 114 * in the tree will first validate, then update the counts on each branch up to 115 * the common ancestor. A receive will also validate the counts and then update 116 * them. 117 * 118 * An exception to the above behavior is that the limit is not enforced if the 119 * user has permission to modify the limit. This is primarily so that 120 * recursive snapshots in the global zone always work. We want to prevent a 121 * denial-of-service in which a lower level delegated dataset could max out its 122 * limit and thus block recursive snapshots from being taken in the global zone. 123 * Because of this, it is possible for the snapshot count to be over the limit 124 * and snapshots taken in the global zone could cause a lower level dataset to 125 * hit or exceed its limit. The administrator taking the global zone recursive 126 * snapshot should be aware of this side-effect and behave accordingly. 127 * For consistency, the filesystem limit is also not enforced if the user can 128 * modify the limit. 129 * 130 * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check() 131 * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in 132 * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by 133 * dsl_dir_init_fs_ss_count(). 134 */ 135 136 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd); 137 138 typedef struct ddulrt_arg { 139 dsl_dir_t *ddulrta_dd; 140 uint64_t ddlrta_txg; 141 } ddulrt_arg_t; 142 143 static void 144 dsl_dir_evict_async(void *dbu) 145 { 146 dsl_dir_t *dd = dbu; 147 int t; 148 dsl_pool_t *dp __maybe_unused = dd->dd_pool; 149 150 dd->dd_dbuf = NULL; 151 152 for (t = 0; t < TXG_SIZE; t++) { 153 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t)); 154 ASSERT(dd->dd_tempreserved[t] == 0); 155 ASSERT(dd->dd_space_towrite[t] == 0); 156 } 157 158 if (dd->dd_parent) 159 dsl_dir_async_rele(dd->dd_parent, dd); 160 161 spa_async_close(dd->dd_pool->dp_spa, dd); 162 163 if (dsl_deadlist_is_open(&dd->dd_livelist)) 164 dsl_dir_livelist_close(dd); 165 166 dsl_prop_fini(dd); 167 cv_destroy(&dd->dd_activity_cv); 168 mutex_destroy(&dd->dd_activity_lock); 169 mutex_destroy(&dd->dd_lock); 170 kmem_free(dd, sizeof (dsl_dir_t)); 171 } 172 173 int 174 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj, 175 const char *tail, const void *tag, dsl_dir_t **ddp) 176 { 177 dmu_buf_t *dbuf; 178 dsl_dir_t *dd; 179 dmu_object_info_t doi; 180 int err; 181 182 ASSERT(dsl_pool_config_held(dp)); 183 184 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf); 185 if (err != 0) 186 return (err); 187 dd = dmu_buf_get_user(dbuf); 188 189 dmu_object_info_from_db(dbuf, &doi); 190 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR); 191 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t)); 192 193 if (dd == NULL) { 194 dsl_dir_t *winner; 195 196 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP); 197 dd->dd_object = ddobj; 198 dd->dd_dbuf = dbuf; 199 dd->dd_pool = dp; 200 201 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL); 202 mutex_init(&dd->dd_activity_lock, NULL, MUTEX_DEFAULT, NULL); 203 cv_init(&dd->dd_activity_cv, NULL, CV_DEFAULT, NULL); 204 dsl_prop_init(dd); 205 206 if (dsl_dir_is_zapified(dd)) { 207 err = zap_lookup(dp->dp_meta_objset, 208 ddobj, DD_FIELD_CRYPTO_KEY_OBJ, 209 sizeof (uint64_t), 1, &dd->dd_crypto_obj); 210 if (err == 0) { 211 /* check for on-disk format errata */ 212 if (dsl_dir_incompatible_encryption_version( 213 dd)) { 214 dp->dp_spa->spa_errata = 215 ZPOOL_ERRATA_ZOL_6845_ENCRYPTION; 216 } 217 } else if (err != ENOENT) { 218 goto errout; 219 } 220 } 221 222 if (dsl_dir_phys(dd)->dd_parent_obj) { 223 err = dsl_dir_hold_obj(dp, 224 dsl_dir_phys(dd)->dd_parent_obj, NULL, dd, 225 &dd->dd_parent); 226 if (err != 0) 227 goto errout; 228 if (tail) { 229 #ifdef ZFS_DEBUG 230 uint64_t foundobj; 231 232 err = zap_lookup(dp->dp_meta_objset, 233 dsl_dir_phys(dd->dd_parent)-> 234 dd_child_dir_zapobj, tail, 235 sizeof (foundobj), 1, &foundobj); 236 ASSERT(err || foundobj == ddobj); 237 #endif 238 (void) strlcpy(dd->dd_myname, tail, 239 sizeof (dd->dd_myname)); 240 } else { 241 err = zap_value_search(dp->dp_meta_objset, 242 dsl_dir_phys(dd->dd_parent)-> 243 dd_child_dir_zapobj, 244 ddobj, 0, dd->dd_myname, 245 sizeof (dd->dd_myname)); 246 } 247 if (err != 0) 248 goto errout; 249 } else { 250 (void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa), 251 sizeof (dd->dd_myname)); 252 } 253 254 if (dsl_dir_is_clone(dd)) { 255 dmu_buf_t *origin_bonus; 256 dsl_dataset_phys_t *origin_phys; 257 258 /* 259 * We can't open the origin dataset, because 260 * that would require opening this dsl_dir. 261 * Just look at its phys directly instead. 262 */ 263 err = dmu_bonus_hold(dp->dp_meta_objset, 264 dsl_dir_phys(dd)->dd_origin_obj, FTAG, 265 &origin_bonus); 266 if (err != 0) 267 goto errout; 268 origin_phys = origin_bonus->db_data; 269 dd->dd_origin_txg = 270 origin_phys->ds_creation_txg; 271 dmu_buf_rele(origin_bonus, FTAG); 272 if (dsl_dir_is_zapified(dd)) { 273 uint64_t obj; 274 err = zap_lookup(dp->dp_meta_objset, 275 dd->dd_object, DD_FIELD_LIVELIST, 276 sizeof (uint64_t), 1, &obj); 277 if (err == 0) { 278 err = dsl_dir_livelist_open(dd, obj); 279 if (err != 0) 280 goto errout; 281 } else if (err != ENOENT) 282 goto errout; 283 } 284 } 285 286 if (dsl_dir_is_zapified(dd)) { 287 inode_timespec_t t = {0}; 288 (void) zap_lookup(dp->dp_meta_objset, ddobj, 289 DD_FIELD_SNAPSHOTS_CHANGED, 290 sizeof (uint64_t), 291 sizeof (inode_timespec_t) / sizeof (uint64_t), 292 &t); 293 dd->dd_snap_cmtime = t; 294 } 295 296 dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async, 297 &dd->dd_dbuf); 298 winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu); 299 if (winner != NULL) { 300 if (dd->dd_parent) 301 dsl_dir_rele(dd->dd_parent, dd); 302 if (dsl_deadlist_is_open(&dd->dd_livelist)) 303 dsl_dir_livelist_close(dd); 304 dsl_prop_fini(dd); 305 cv_destroy(&dd->dd_activity_cv); 306 mutex_destroy(&dd->dd_activity_lock); 307 mutex_destroy(&dd->dd_lock); 308 kmem_free(dd, sizeof (dsl_dir_t)); 309 dd = winner; 310 } else { 311 spa_open_ref(dp->dp_spa, dd); 312 } 313 } 314 315 /* 316 * The dsl_dir_t has both open-to-close and instantiate-to-evict 317 * holds on the spa. We need the open-to-close holds because 318 * otherwise the spa_refcnt wouldn't change when we open a 319 * dir which the spa also has open, so we could incorrectly 320 * think it was OK to unload/export/destroy the pool. We need 321 * the instantiate-to-evict hold because the dsl_dir_t has a 322 * pointer to the dd_pool, which has a pointer to the spa_t. 323 */ 324 spa_open_ref(dp->dp_spa, tag); 325 ASSERT3P(dd->dd_pool, ==, dp); 326 ASSERT3U(dd->dd_object, ==, ddobj); 327 ASSERT3P(dd->dd_dbuf, ==, dbuf); 328 *ddp = dd; 329 return (0); 330 331 errout: 332 if (dd->dd_parent) 333 dsl_dir_rele(dd->dd_parent, dd); 334 if (dsl_deadlist_is_open(&dd->dd_livelist)) 335 dsl_dir_livelist_close(dd); 336 dsl_prop_fini(dd); 337 cv_destroy(&dd->dd_activity_cv); 338 mutex_destroy(&dd->dd_activity_lock); 339 mutex_destroy(&dd->dd_lock); 340 kmem_free(dd, sizeof (dsl_dir_t)); 341 dmu_buf_rele(dbuf, tag); 342 return (err); 343 } 344 345 void 346 dsl_dir_rele(dsl_dir_t *dd, const void *tag) 347 { 348 dprintf_dd(dd, "%s\n", ""); 349 spa_close(dd->dd_pool->dp_spa, tag); 350 dmu_buf_rele(dd->dd_dbuf, tag); 351 } 352 353 /* 354 * Remove a reference to the given dsl dir that is being asynchronously 355 * released. Async releases occur from a taskq performing eviction of 356 * dsl datasets and dirs. This process is identical to a normal release 357 * with the exception of using the async API for releasing the reference on 358 * the spa. 359 */ 360 void 361 dsl_dir_async_rele(dsl_dir_t *dd, const void *tag) 362 { 363 dprintf_dd(dd, "%s\n", ""); 364 spa_async_close(dd->dd_pool->dp_spa, tag); 365 dmu_buf_rele(dd->dd_dbuf, tag); 366 } 367 368 /* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */ 369 void 370 dsl_dir_name(dsl_dir_t *dd, char *buf) 371 { 372 if (dd->dd_parent) { 373 dsl_dir_name(dd->dd_parent, buf); 374 VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <, 375 ZFS_MAX_DATASET_NAME_LEN); 376 } else { 377 buf[0] = '\0'; 378 } 379 if (!MUTEX_HELD(&dd->dd_lock)) { 380 /* 381 * recursive mutex so that we can use 382 * dprintf_dd() with dd_lock held 383 */ 384 mutex_enter(&dd->dd_lock); 385 VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN), 386 <, ZFS_MAX_DATASET_NAME_LEN); 387 mutex_exit(&dd->dd_lock); 388 } else { 389 VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN), 390 <, ZFS_MAX_DATASET_NAME_LEN); 391 } 392 } 393 394 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */ 395 int 396 dsl_dir_namelen(dsl_dir_t *dd) 397 { 398 int result = 0; 399 400 if (dd->dd_parent) { 401 /* parent's name + 1 for the "/" */ 402 result = dsl_dir_namelen(dd->dd_parent) + 1; 403 } 404 405 if (!MUTEX_HELD(&dd->dd_lock)) { 406 /* see dsl_dir_name */ 407 mutex_enter(&dd->dd_lock); 408 result += strlen(dd->dd_myname); 409 mutex_exit(&dd->dd_lock); 410 } else { 411 result += strlen(dd->dd_myname); 412 } 413 414 return (result); 415 } 416 417 static int 418 getcomponent(const char *path, char *component, const char **nextp) 419 { 420 char *p; 421 422 if ((path == NULL) || (path[0] == '\0')) 423 return (SET_ERROR(ENOENT)); 424 /* This would be a good place to reserve some namespace... */ 425 p = strpbrk(path, "/@"); 426 if (p && (p[1] == '/' || p[1] == '@')) { 427 /* two separators in a row */ 428 return (SET_ERROR(EINVAL)); 429 } 430 if (p == NULL || p == path) { 431 /* 432 * if the first thing is an @ or /, it had better be an 433 * @ and it had better not have any more ats or slashes, 434 * and it had better have something after the @. 435 */ 436 if (p != NULL && 437 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0')) 438 return (SET_ERROR(EINVAL)); 439 if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN) 440 return (SET_ERROR(ENAMETOOLONG)); 441 (void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN); 442 p = NULL; 443 } else if (p[0] == '/') { 444 if (p - path >= ZFS_MAX_DATASET_NAME_LEN) 445 return (SET_ERROR(ENAMETOOLONG)); 446 (void) strlcpy(component, path, p - path + 1); 447 p++; 448 } else if (p[0] == '@') { 449 /* 450 * if the next separator is an @, there better not be 451 * any more slashes. 452 */ 453 if (strchr(path, '/')) 454 return (SET_ERROR(EINVAL)); 455 if (p - path >= ZFS_MAX_DATASET_NAME_LEN) 456 return (SET_ERROR(ENAMETOOLONG)); 457 (void) strlcpy(component, path, p - path + 1); 458 } else { 459 panic("invalid p=%p", (void *)p); 460 } 461 *nextp = p; 462 return (0); 463 } 464 465 /* 466 * Return the dsl_dir_t, and possibly the last component which couldn't 467 * be found in *tail. The name must be in the specified dsl_pool_t. This 468 * thread must hold the dp_config_rwlock for the pool. Returns NULL if the 469 * path is bogus, or if tail==NULL and we couldn't parse the whole name. 470 * (*tail)[0] == '@' means that the last component is a snapshot. 471 */ 472 int 473 dsl_dir_hold(dsl_pool_t *dp, const char *name, const void *tag, 474 dsl_dir_t **ddp, const char **tailp) 475 { 476 char *buf; 477 const char *spaname, *next, *nextnext = NULL; 478 int err; 479 dsl_dir_t *dd; 480 uint64_t ddobj; 481 482 buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 483 err = getcomponent(name, buf, &next); 484 if (err != 0) 485 goto error; 486 487 /* Make sure the name is in the specified pool. */ 488 spaname = spa_name(dp->dp_spa); 489 if (strcmp(buf, spaname) != 0) { 490 err = SET_ERROR(EXDEV); 491 goto error; 492 } 493 494 ASSERT(dsl_pool_config_held(dp)); 495 496 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd); 497 if (err != 0) { 498 goto error; 499 } 500 501 while (next != NULL) { 502 dsl_dir_t *child_dd; 503 err = getcomponent(next, buf, &nextnext); 504 if (err != 0) 505 break; 506 ASSERT(next[0] != '\0'); 507 if (next[0] == '@') 508 break; 509 dprintf("looking up %s in obj%lld\n", 510 buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj); 511 512 err = zap_lookup(dp->dp_meta_objset, 513 dsl_dir_phys(dd)->dd_child_dir_zapobj, 514 buf, sizeof (ddobj), 1, &ddobj); 515 if (err != 0) { 516 if (err == ENOENT) 517 err = 0; 518 break; 519 } 520 521 err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd); 522 if (err != 0) 523 break; 524 dsl_dir_rele(dd, tag); 525 dd = child_dd; 526 next = nextnext; 527 } 528 529 if (err != 0) { 530 dsl_dir_rele(dd, tag); 531 goto error; 532 } 533 534 /* 535 * It's an error if there's more than one component left, or 536 * tailp==NULL and there's any component left. 537 */ 538 if (next != NULL && 539 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) { 540 /* bad path name */ 541 dsl_dir_rele(dd, tag); 542 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp); 543 err = SET_ERROR(ENOENT); 544 } 545 if (tailp != NULL) 546 *tailp = next; 547 if (err == 0) 548 *ddp = dd; 549 error: 550 kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN); 551 return (err); 552 } 553 554 /* 555 * If the counts are already initialized for this filesystem and its 556 * descendants then do nothing, otherwise initialize the counts. 557 * 558 * The counts on this filesystem, and those below, may be uninitialized due to 559 * either the use of a pre-existing pool which did not support the 560 * filesystem/snapshot limit feature, or one in which the feature had not yet 561 * been enabled. 562 * 563 * Recursively descend the filesystem tree and update the filesystem/snapshot 564 * counts on each filesystem below, then update the cumulative count on the 565 * current filesystem. If the filesystem already has a count set on it, 566 * then we know that its counts, and the counts on the filesystems below it, 567 * are already correct, so we don't have to update this filesystem. 568 */ 569 static void 570 dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx) 571 { 572 uint64_t my_fs_cnt = 0; 573 uint64_t my_ss_cnt = 0; 574 dsl_pool_t *dp = dd->dd_pool; 575 objset_t *os = dp->dp_meta_objset; 576 zap_cursor_t *zc; 577 zap_attribute_t *za; 578 dsl_dataset_t *ds; 579 580 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)); 581 ASSERT(dsl_pool_config_held(dp)); 582 ASSERT(dmu_tx_is_syncing(tx)); 583 584 dsl_dir_zapify(dd, tx); 585 586 /* 587 * If the filesystem count has already been initialized then we 588 * don't need to recurse down any further. 589 */ 590 if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0) 591 return; 592 593 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); 594 za = zap_attribute_alloc(); 595 596 /* Iterate my child dirs */ 597 for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj); 598 zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) { 599 dsl_dir_t *chld_dd; 600 uint64_t count; 601 602 VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG, 603 &chld_dd)); 604 605 /* 606 * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets. 607 */ 608 if (chld_dd->dd_myname[0] == '$') { 609 dsl_dir_rele(chld_dd, FTAG); 610 continue; 611 } 612 613 my_fs_cnt++; /* count this child */ 614 615 dsl_dir_init_fs_ss_count(chld_dd, tx); 616 617 VERIFY0(zap_lookup(os, chld_dd->dd_object, 618 DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count)); 619 my_fs_cnt += count; 620 VERIFY0(zap_lookup(os, chld_dd->dd_object, 621 DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count)); 622 my_ss_cnt += count; 623 624 dsl_dir_rele(chld_dd, FTAG); 625 } 626 zap_cursor_fini(zc); 627 /* Count my snapshots (we counted children's snapshots above) */ 628 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool, 629 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds)); 630 631 for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj); 632 zap_cursor_retrieve(zc, za) == 0; 633 zap_cursor_advance(zc)) { 634 /* Don't count temporary snapshots */ 635 if (za->za_name[0] != '%') 636 my_ss_cnt++; 637 } 638 zap_cursor_fini(zc); 639 640 dsl_dataset_rele(ds, FTAG); 641 642 kmem_free(zc, sizeof (zap_cursor_t)); 643 zap_attribute_free(za); 644 645 /* we're in a sync task, update counts */ 646 dmu_buf_will_dirty(dd->dd_dbuf, tx); 647 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT, 648 sizeof (my_fs_cnt), 1, &my_fs_cnt, tx)); 649 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT, 650 sizeof (my_ss_cnt), 1, &my_ss_cnt, tx)); 651 } 652 653 static int 654 dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx) 655 { 656 char *ddname = (char *)arg; 657 dsl_pool_t *dp = dmu_tx_pool(tx); 658 dsl_dataset_t *ds; 659 dsl_dir_t *dd; 660 int error; 661 662 error = dsl_dataset_hold(dp, ddname, FTAG, &ds); 663 if (error != 0) 664 return (error); 665 666 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) { 667 dsl_dataset_rele(ds, FTAG); 668 return (SET_ERROR(ENOTSUP)); 669 } 670 671 dd = ds->ds_dir; 672 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) && 673 dsl_dir_is_zapified(dd) && 674 zap_contains(dp->dp_meta_objset, dd->dd_object, 675 DD_FIELD_FILESYSTEM_COUNT) == 0) { 676 dsl_dataset_rele(ds, FTAG); 677 return (SET_ERROR(EALREADY)); 678 } 679 680 dsl_dataset_rele(ds, FTAG); 681 return (0); 682 } 683 684 static void 685 dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx) 686 { 687 char *ddname = (char *)arg; 688 dsl_pool_t *dp = dmu_tx_pool(tx); 689 dsl_dataset_t *ds; 690 spa_t *spa; 691 692 VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds)); 693 694 spa = dsl_dataset_get_spa(ds); 695 696 if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) { 697 /* 698 * Since the feature was not active and we're now setting a 699 * limit, increment the feature-active counter so that the 700 * feature becomes active for the first time. 701 * 702 * We are already in a sync task so we can update the MOS. 703 */ 704 spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx); 705 } 706 707 /* 708 * Since we are now setting a non-UINT64_MAX limit on the filesystem, 709 * we need to ensure the counts are correct. Descend down the tree from 710 * this point and update all of the counts to be accurate. 711 */ 712 dsl_dir_init_fs_ss_count(ds->ds_dir, tx); 713 714 dsl_dataset_rele(ds, FTAG); 715 } 716 717 /* 718 * Make sure the feature is enabled and activate it if necessary. 719 * Since we're setting a limit, ensure the on-disk counts are valid. 720 * This is only called by the ioctl path when setting a limit value. 721 * 722 * We do not need to validate the new limit, since users who can change the 723 * limit are also allowed to exceed the limit. 724 */ 725 int 726 dsl_dir_activate_fs_ss_limit(const char *ddname) 727 { 728 int error; 729 730 error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check, 731 dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0, 732 ZFS_SPACE_CHECK_RESERVED); 733 734 if (error == EALREADY) 735 error = 0; 736 737 return (error); 738 } 739 740 /* 741 * Used to determine if the filesystem_limit or snapshot_limit should be 742 * enforced. We allow the limit to be exceeded if the user has permission to 743 * write the property value. We pass in the creds that we got in the open 744 * context since we will always be the GZ root in syncing context. We also have 745 * to handle the case where we are allowed to change the limit on the current 746 * dataset, but there may be another limit in the tree above. 747 * 748 * We can never modify these two properties within a non-global zone. In 749 * addition, the other checks are modeled on zfs_secpolicy_write_perms. We 750 * can't use that function since we are already holding the dp_config_rwlock. 751 * In addition, we already have the dd and dealing with snapshots is simplified 752 * in this code. 753 */ 754 755 typedef enum { 756 ENFORCE_ALWAYS, 757 ENFORCE_NEVER, 758 ENFORCE_ABOVE 759 } enforce_res_t; 760 761 static enforce_res_t 762 dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, 763 cred_t *cr) 764 { 765 enforce_res_t enforce = ENFORCE_ALWAYS; 766 uint64_t obj; 767 dsl_dataset_t *ds; 768 uint64_t zoned; 769 const char *zonedstr; 770 771 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT || 772 prop == ZFS_PROP_SNAPSHOT_LIMIT); 773 774 #ifdef _KERNEL 775 if (crgetzoneid(cr) != GLOBAL_ZONEID) 776 return (ENFORCE_ALWAYS); 777 778 if (secpolicy_zfs(cr) == 0) 779 return (ENFORCE_NEVER); 780 #endif 781 782 if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0) 783 return (ENFORCE_ALWAYS); 784 785 ASSERT(dsl_pool_config_held(dd->dd_pool)); 786 787 if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0) 788 return (ENFORCE_ALWAYS); 789 790 zonedstr = zfs_prop_to_name(ZFS_PROP_ZONED); 791 if (dsl_prop_get_ds(ds, zonedstr, 8, 1, &zoned, NULL) || zoned) { 792 /* Only root can access zoned fs's from the GZ */ 793 enforce = ENFORCE_ALWAYS; 794 } else { 795 if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0) 796 enforce = ENFORCE_ABOVE; 797 } 798 799 dsl_dataset_rele(ds, FTAG); 800 return (enforce); 801 } 802 803 /* 804 * Check if adding additional child filesystem(s) would exceed any filesystem 805 * limits or adding additional snapshot(s) would exceed any snapshot limits. 806 * The prop argument indicates which limit to check. 807 * 808 * Note that all filesystem limits up to the root (or the highest 809 * initialized) filesystem or the given ancestor must be satisfied. 810 */ 811 int 812 dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop, 813 dsl_dir_t *ancestor, cred_t *cr) 814 { 815 objset_t *os = dd->dd_pool->dp_meta_objset; 816 uint64_t limit, count; 817 const char *count_prop; 818 enforce_res_t enforce; 819 int err = 0; 820 821 ASSERT(dsl_pool_config_held(dd->dd_pool)); 822 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT || 823 prop == ZFS_PROP_SNAPSHOT_LIMIT); 824 825 if (prop == ZFS_PROP_SNAPSHOT_LIMIT) { 826 /* 827 * We don't enforce the limit for temporary snapshots. This is 828 * indicated by a NULL cred_t argument. 829 */ 830 if (cr == NULL) 831 return (0); 832 833 count_prop = DD_FIELD_SNAPSHOT_COUNT; 834 } else { 835 count_prop = DD_FIELD_FILESYSTEM_COUNT; 836 } 837 /* 838 * If we're allowed to change the limit, don't enforce the limit 839 * e.g. this can happen if a snapshot is taken by an administrative 840 * user in the global zone (i.e. a recursive snapshot by root). 841 * However, we must handle the case of delegated permissions where we 842 * are allowed to change the limit on the current dataset, but there 843 * is another limit in the tree above. 844 */ 845 enforce = dsl_enforce_ds_ss_limits(dd, prop, cr); 846 if (enforce == ENFORCE_NEVER) 847 return (0); 848 849 /* 850 * e.g. if renaming a dataset with no snapshots, count adjustment 851 * is 0. 852 */ 853 if (delta == 0) 854 return (0); 855 856 /* 857 * If an ancestor has been provided, stop checking the limit once we 858 * hit that dir. We need this during rename so that we don't overcount 859 * the check once we recurse up to the common ancestor. 860 */ 861 if (ancestor == dd) 862 return (0); 863 864 /* 865 * If we hit an uninitialized node while recursing up the tree, we can 866 * stop since we know there is no limit here (or above). The counts are 867 * not valid on this node and we know we won't touch this node's counts. 868 */ 869 if (!dsl_dir_is_zapified(dd)) 870 return (0); 871 err = zap_lookup(os, dd->dd_object, 872 count_prop, sizeof (count), 1, &count); 873 if (err == ENOENT) 874 return (0); 875 if (err != 0) 876 return (err); 877 878 err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL, 879 B_FALSE); 880 if (err != 0) 881 return (err); 882 883 /* Is there a limit which we've hit? */ 884 if (enforce == ENFORCE_ALWAYS && (count + delta) > limit) 885 return (SET_ERROR(EDQUOT)); 886 887 if (dd->dd_parent != NULL) 888 err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop, 889 ancestor, cr); 890 891 return (err); 892 } 893 894 /* 895 * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all 896 * parents. When a new filesystem/snapshot is created, increment the count on 897 * all parents, and when a filesystem/snapshot is destroyed, decrement the 898 * count. 899 */ 900 void 901 dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop, 902 dmu_tx_t *tx) 903 { 904 int err; 905 objset_t *os = dd->dd_pool->dp_meta_objset; 906 uint64_t count; 907 908 ASSERT(dsl_pool_config_held(dd->dd_pool)); 909 ASSERT(dmu_tx_is_syncing(tx)); 910 ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 || 911 strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0); 912 913 /* 914 * We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets. 915 */ 916 if (dd->dd_myname[0] == '$' && strcmp(prop, 917 DD_FIELD_FILESYSTEM_COUNT) == 0) { 918 return; 919 } 920 921 /* 922 * e.g. if renaming a dataset with no snapshots, count adjustment is 0 923 */ 924 if (delta == 0) 925 return; 926 927 /* 928 * If we hit an uninitialized node while recursing up the tree, we can 929 * stop since we know the counts are not valid on this node and we 930 * know we shouldn't touch this node's counts. An uninitialized count 931 * on the node indicates that either the feature has not yet been 932 * activated or there are no limits on this part of the tree. 933 */ 934 if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object, 935 prop, sizeof (count), 1, &count)) == ENOENT) 936 return; 937 VERIFY0(err); 938 939 count += delta; 940 /* Use a signed verify to make sure we're not neg. */ 941 VERIFY3S(count, >=, 0); 942 943 VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count, 944 tx)); 945 946 /* Roll up this additional count into our ancestors */ 947 if (dd->dd_parent != NULL) 948 dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx); 949 } 950 951 uint64_t 952 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name, 953 dmu_tx_t *tx) 954 { 955 objset_t *mos = dp->dp_meta_objset; 956 uint64_t ddobj; 957 dsl_dir_phys_t *ddphys; 958 dmu_buf_t *dbuf; 959 960 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0, 961 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); 962 if (pds) { 963 VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj, 964 name, sizeof (uint64_t), 1, &ddobj, tx)); 965 } else { 966 /* it's the root dir */ 967 VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, 968 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx)); 969 } 970 VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf)); 971 dmu_buf_will_dirty(dbuf, tx); 972 ddphys = dbuf->db_data; 973 974 ddphys->dd_creation_time = gethrestime_sec(); 975 if (pds) { 976 ddphys->dd_parent_obj = pds->dd_object; 977 978 /* update the filesystem counts */ 979 dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx); 980 } 981 ddphys->dd_props_zapobj = zap_create(mos, 982 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); 983 ddphys->dd_child_dir_zapobj = zap_create(mos, 984 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); 985 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN) 986 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN; 987 988 dmu_buf_rele(dbuf, FTAG); 989 990 return (ddobj); 991 } 992 993 boolean_t 994 dsl_dir_is_clone(dsl_dir_t *dd) 995 { 996 return (dsl_dir_phys(dd)->dd_origin_obj && 997 (dd->dd_pool->dp_origin_snap == NULL || 998 dsl_dir_phys(dd)->dd_origin_obj != 999 dd->dd_pool->dp_origin_snap->ds_object)); 1000 } 1001 1002 uint64_t 1003 dsl_dir_get_used(dsl_dir_t *dd) 1004 { 1005 return (dsl_dir_phys(dd)->dd_used_bytes); 1006 } 1007 1008 uint64_t 1009 dsl_dir_get_compressed(dsl_dir_t *dd) 1010 { 1011 return (dsl_dir_phys(dd)->dd_compressed_bytes); 1012 } 1013 1014 uint64_t 1015 dsl_dir_get_quota(dsl_dir_t *dd) 1016 { 1017 return (dsl_dir_phys(dd)->dd_quota); 1018 } 1019 1020 uint64_t 1021 dsl_dir_get_reservation(dsl_dir_t *dd) 1022 { 1023 return (dsl_dir_phys(dd)->dd_reserved); 1024 } 1025 1026 uint64_t 1027 dsl_dir_get_compressratio(dsl_dir_t *dd) 1028 { 1029 /* a fixed point number, 100x the ratio */ 1030 return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 : 1031 (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 / 1032 dsl_dir_phys(dd)->dd_compressed_bytes)); 1033 } 1034 1035 uint64_t 1036 dsl_dir_get_logicalused(dsl_dir_t *dd) 1037 { 1038 return (dsl_dir_phys(dd)->dd_uncompressed_bytes); 1039 } 1040 1041 uint64_t 1042 dsl_dir_get_usedsnap(dsl_dir_t *dd) 1043 { 1044 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]); 1045 } 1046 1047 uint64_t 1048 dsl_dir_get_usedds(dsl_dir_t *dd) 1049 { 1050 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]); 1051 } 1052 1053 uint64_t 1054 dsl_dir_get_usedrefreserv(dsl_dir_t *dd) 1055 { 1056 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]); 1057 } 1058 1059 uint64_t 1060 dsl_dir_get_usedchild(dsl_dir_t *dd) 1061 { 1062 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] + 1063 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]); 1064 } 1065 1066 void 1067 dsl_dir_get_origin(dsl_dir_t *dd, char *buf) 1068 { 1069 dsl_dataset_t *ds; 1070 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool, 1071 dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds)); 1072 1073 dsl_dataset_name(ds, buf); 1074 1075 dsl_dataset_rele(ds, FTAG); 1076 } 1077 1078 int 1079 dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count) 1080 { 1081 if (dsl_dir_is_zapified(dd)) { 1082 objset_t *os = dd->dd_pool->dp_meta_objset; 1083 return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT, 1084 sizeof (*count), 1, count)); 1085 } else { 1086 return (SET_ERROR(ENOENT)); 1087 } 1088 } 1089 1090 int 1091 dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count) 1092 { 1093 if (dsl_dir_is_zapified(dd)) { 1094 objset_t *os = dd->dd_pool->dp_meta_objset; 1095 return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT, 1096 sizeof (*count), 1, count)); 1097 } else { 1098 return (SET_ERROR(ENOENT)); 1099 } 1100 } 1101 1102 void 1103 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv) 1104 { 1105 mutex_enter(&dd->dd_lock); 1106 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, 1107 dsl_dir_get_quota(dd)); 1108 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION, 1109 dsl_dir_get_reservation(dd)); 1110 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED, 1111 dsl_dir_get_logicalused(dd)); 1112 if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) { 1113 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP, 1114 dsl_dir_get_usedsnap(dd)); 1115 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS, 1116 dsl_dir_get_usedds(dd)); 1117 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV, 1118 dsl_dir_get_usedrefreserv(dd)); 1119 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD, 1120 dsl_dir_get_usedchild(dd)); 1121 } 1122 mutex_exit(&dd->dd_lock); 1123 1124 uint64_t count; 1125 if (dsl_dir_get_filesystem_count(dd, &count) == 0) { 1126 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT, 1127 count); 1128 } 1129 if (dsl_dir_get_snapshot_count(dd, &count) == 0) { 1130 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT, 1131 count); 1132 } 1133 1134 if (dsl_dir_is_clone(dd)) { 1135 char buf[ZFS_MAX_DATASET_NAME_LEN]; 1136 dsl_dir_get_origin(dd, buf); 1137 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf); 1138 } 1139 1140 } 1141 1142 void 1143 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx) 1144 { 1145 dsl_pool_t *dp = dd->dd_pool; 1146 1147 ASSERT(dsl_dir_phys(dd)); 1148 1149 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) { 1150 /* up the hold count until we can be written out */ 1151 dmu_buf_add_ref(dd->dd_dbuf, dd); 1152 } 1153 } 1154 1155 static int64_t 1156 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta) 1157 { 1158 uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved); 1159 uint64_t new_accounted = 1160 MAX(used + delta, dsl_dir_phys(dd)->dd_reserved); 1161 return (new_accounted - old_accounted); 1162 } 1163 1164 void 1165 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx) 1166 { 1167 ASSERT(dmu_tx_is_syncing(tx)); 1168 1169 mutex_enter(&dd->dd_lock); 1170 ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]); 1171 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg, 1172 (u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024); 1173 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0; 1174 mutex_exit(&dd->dd_lock); 1175 1176 /* release the hold from dsl_dir_dirty */ 1177 dmu_buf_rele(dd->dd_dbuf, dd); 1178 } 1179 1180 static uint64_t 1181 dsl_dir_space_towrite(dsl_dir_t *dd) 1182 { 1183 uint64_t space = 0; 1184 1185 ASSERT(MUTEX_HELD(&dd->dd_lock)); 1186 1187 for (int i = 0; i < TXG_SIZE; i++) 1188 space += dd->dd_space_towrite[i & TXG_MASK]; 1189 1190 return (space); 1191 } 1192 1193 /* 1194 * How much space would dd have available if ancestor had delta applied 1195 * to it? If ondiskonly is set, we're only interested in what's 1196 * on-disk, not estimated pending changes. 1197 */ 1198 uint64_t 1199 dsl_dir_space_available(dsl_dir_t *dd, 1200 dsl_dir_t *ancestor, int64_t delta, int ondiskonly) 1201 { 1202 uint64_t parentspace, myspace, quota, used; 1203 1204 /* 1205 * If there are no restrictions otherwise, assume we have 1206 * unlimited space available. 1207 */ 1208 quota = UINT64_MAX; 1209 parentspace = UINT64_MAX; 1210 1211 if (dd->dd_parent != NULL) { 1212 parentspace = dsl_dir_space_available(dd->dd_parent, 1213 ancestor, delta, ondiskonly); 1214 } 1215 1216 mutex_enter(&dd->dd_lock); 1217 if (dsl_dir_phys(dd)->dd_quota != 0) 1218 quota = dsl_dir_phys(dd)->dd_quota; 1219 used = dsl_dir_phys(dd)->dd_used_bytes; 1220 if (!ondiskonly) 1221 used += dsl_dir_space_towrite(dd); 1222 1223 if (dd->dd_parent == NULL) { 1224 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, 1225 ZFS_SPACE_CHECK_NORMAL); 1226 quota = MIN(quota, poolsize); 1227 } 1228 1229 if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) { 1230 /* 1231 * We have some space reserved, in addition to what our 1232 * parent gave us. 1233 */ 1234 parentspace += dsl_dir_phys(dd)->dd_reserved - used; 1235 } 1236 1237 if (dd == ancestor) { 1238 ASSERT(delta <= 0); 1239 ASSERT(used >= -delta); 1240 used += delta; 1241 if (parentspace != UINT64_MAX) 1242 parentspace -= delta; 1243 } 1244 1245 if (used > quota) { 1246 /* over quota */ 1247 myspace = 0; 1248 } else { 1249 /* 1250 * the lesser of the space provided by our parent and 1251 * the space left in our quota 1252 */ 1253 myspace = MIN(parentspace, quota - used); 1254 } 1255 1256 mutex_exit(&dd->dd_lock); 1257 1258 return (myspace); 1259 } 1260 1261 struct tempreserve { 1262 list_node_t tr_node; 1263 dsl_dir_t *tr_ds; 1264 uint64_t tr_size; 1265 }; 1266 1267 static int 1268 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree, 1269 boolean_t ignorequota, list_t *tr_list, 1270 dmu_tx_t *tx, boolean_t first) 1271 { 1272 uint64_t txg; 1273 uint64_t quota; 1274 struct tempreserve *tr; 1275 int retval; 1276 uint64_t ext_quota; 1277 uint64_t ref_rsrv; 1278 1279 top_of_function: 1280 txg = tx->tx_txg; 1281 retval = EDQUOT; 1282 ref_rsrv = 0; 1283 1284 ASSERT3U(txg, !=, 0); 1285 ASSERT3S(asize, >, 0); 1286 1287 mutex_enter(&dd->dd_lock); 1288 1289 /* 1290 * Check against the dsl_dir's quota. We don't add in the delta 1291 * when checking for over-quota because they get one free hit. 1292 */ 1293 uint64_t est_inflight = dsl_dir_space_towrite(dd); 1294 for (int i = 0; i < TXG_SIZE; i++) 1295 est_inflight += dd->dd_tempreserved[i]; 1296 uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes; 1297 1298 /* 1299 * On the first iteration, fetch the dataset's used-on-disk and 1300 * refreservation values. Also, if checkrefquota is set, test if 1301 * allocating this space would exceed the dataset's refquota. 1302 */ 1303 if (first && tx->tx_objset) { 1304 int error; 1305 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset; 1306 1307 error = dsl_dataset_check_quota(ds, !netfree, 1308 asize, est_inflight, &used_on_disk, &ref_rsrv); 1309 if (error != 0) { 1310 mutex_exit(&dd->dd_lock); 1311 DMU_TX_STAT_BUMP(dmu_tx_quota); 1312 return (error); 1313 } 1314 } 1315 1316 /* 1317 * If this transaction will result in a net free of space, 1318 * we want to let it through. 1319 */ 1320 if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0 || 1321 (tx->tx_objset && dmu_objset_type(tx->tx_objset) == DMU_OST_ZVOL && 1322 zvol_enforce_quotas == B_FALSE)) 1323 quota = UINT64_MAX; 1324 else 1325 quota = dsl_dir_phys(dd)->dd_quota; 1326 1327 /* 1328 * Adjust the quota against the actual pool size at the root 1329 * minus any outstanding deferred frees. 1330 * To ensure that it's possible to remove files from a full 1331 * pool without inducing transient overcommits, we throttle 1332 * netfree transactions against a quota that is slightly larger, 1333 * but still within the pool's allocation slop. In cases where 1334 * we're very close to full, this will allow a steady trickle of 1335 * removes to get through. 1336 */ 1337 if (dd->dd_parent == NULL) { 1338 uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool, 1339 (netfree) ? 1340 ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL); 1341 1342 if (avail < quota) { 1343 quota = avail; 1344 retval = SET_ERROR(ENOSPC); 1345 } 1346 } 1347 1348 /* 1349 * If they are requesting more space, and our current estimate 1350 * is over quota, they get to try again unless the actual 1351 * on-disk is over quota and there are no pending changes 1352 * or deferred frees (which may free up space for us). 1353 */ 1354 ext_quota = quota >> 5; 1355 if (quota == UINT64_MAX) 1356 ext_quota = 0; 1357 1358 if (used_on_disk >= quota) { 1359 if (retval == ENOSPC && (used_on_disk - quota) < 1360 dsl_pool_deferred_space(dd->dd_pool)) { 1361 retval = SET_ERROR(ERESTART); 1362 } 1363 /* Quota exceeded */ 1364 mutex_exit(&dd->dd_lock); 1365 DMU_TX_STAT_BUMP(dmu_tx_quota); 1366 return (retval); 1367 } else if (used_on_disk + est_inflight >= quota + ext_quota) { 1368 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK " 1369 "quota=%lluK tr=%lluK\n", 1370 (u_longlong_t)used_on_disk>>10, 1371 (u_longlong_t)est_inflight>>10, 1372 (u_longlong_t)quota>>10, (u_longlong_t)asize>>10); 1373 mutex_exit(&dd->dd_lock); 1374 DMU_TX_STAT_BUMP(dmu_tx_quota); 1375 return (SET_ERROR(ERESTART)); 1376 } 1377 1378 /* We need to up our estimated delta before dropping dd_lock */ 1379 dd->dd_tempreserved[txg & TXG_MASK] += asize; 1380 1381 uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight, 1382 asize - ref_rsrv); 1383 mutex_exit(&dd->dd_lock); 1384 1385 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); 1386 tr->tr_ds = dd; 1387 tr->tr_size = asize; 1388 list_insert_tail(tr_list, tr); 1389 1390 /* see if it's OK with our parent */ 1391 if (dd->dd_parent != NULL && parent_rsrv != 0) { 1392 /* 1393 * Recurse on our parent without recursion. This has been 1394 * observed to be potentially large stack usage even within 1395 * the test suite. Largest seen stack was 7632 bytes on linux. 1396 */ 1397 1398 dd = dd->dd_parent; 1399 asize = parent_rsrv; 1400 ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0); 1401 first = B_FALSE; 1402 goto top_of_function; 1403 } 1404 1405 return (0); 1406 } 1407 1408 /* 1409 * Reserve space in this dsl_dir, to be used in this tx's txg. 1410 * After the space has been dirtied (and dsl_dir_willuse_space() 1411 * has been called), the reservation should be canceled, using 1412 * dsl_dir_tempreserve_clear(). 1413 */ 1414 int 1415 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize, 1416 boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx) 1417 { 1418 int err; 1419 list_t *tr_list; 1420 1421 if (asize == 0) { 1422 *tr_cookiep = NULL; 1423 return (0); 1424 } 1425 1426 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP); 1427 list_create(tr_list, sizeof (struct tempreserve), 1428 offsetof(struct tempreserve, tr_node)); 1429 ASSERT3S(asize, >, 0); 1430 1431 err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg); 1432 if (err == 0) { 1433 struct tempreserve *tr; 1434 1435 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); 1436 tr->tr_size = lsize; 1437 list_insert_tail(tr_list, tr); 1438 } else { 1439 if (err == EAGAIN) { 1440 /* 1441 * If arc_memory_throttle() detected that pageout 1442 * is running and we are low on memory, we delay new 1443 * non-pageout transactions to give pageout an 1444 * advantage. 1445 * 1446 * It is unfortunate to be delaying while the caller's 1447 * locks are held. 1448 */ 1449 txg_delay(dd->dd_pool, tx->tx_txg, 1450 MSEC2NSEC(10), MSEC2NSEC(10)); 1451 err = SET_ERROR(ERESTART); 1452 } 1453 1454 ASSERT3U(err, ==, ERESTART); 1455 } 1456 1457 if (err == 0) { 1458 err = dsl_dir_tempreserve_impl(dd, asize, netfree, 1459 B_FALSE, tr_list, tx, B_TRUE); 1460 } 1461 1462 if (err != 0) 1463 dsl_dir_tempreserve_clear(tr_list, tx); 1464 else 1465 *tr_cookiep = tr_list; 1466 1467 return (err); 1468 } 1469 1470 /* 1471 * Clear a temporary reservation that we previously made with 1472 * dsl_dir_tempreserve_space(). 1473 */ 1474 void 1475 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx) 1476 { 1477 int txgidx = tx->tx_txg & TXG_MASK; 1478 list_t *tr_list = tr_cookie; 1479 struct tempreserve *tr; 1480 1481 ASSERT3U(tx->tx_txg, !=, 0); 1482 1483 if (tr_cookie == NULL) 1484 return; 1485 1486 while ((tr = list_remove_head(tr_list)) != NULL) { 1487 if (tr->tr_ds) { 1488 mutex_enter(&tr->tr_ds->dd_lock); 1489 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=, 1490 tr->tr_size); 1491 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size; 1492 mutex_exit(&tr->tr_ds->dd_lock); 1493 } else { 1494 arc_tempreserve_clear(tr->tr_size); 1495 } 1496 kmem_free(tr, sizeof (struct tempreserve)); 1497 } 1498 1499 kmem_free(tr_list, sizeof (list_t)); 1500 } 1501 1502 /* 1503 * This should be called from open context when we think we're going to write 1504 * or free space, for example when dirtying data. Be conservative; it's okay 1505 * to write less space or free more, but we don't want to write more or free 1506 * less than the amount specified. 1507 * 1508 * NOTE: The behavior of this function is identical to the Illumos / FreeBSD 1509 * version however it has been adjusted to use an iterative rather than 1510 * recursive algorithm to minimize stack usage. 1511 */ 1512 void 1513 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx) 1514 { 1515 int64_t parent_space; 1516 uint64_t est_used; 1517 1518 do { 1519 mutex_enter(&dd->dd_lock); 1520 if (space > 0) 1521 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space; 1522 1523 est_used = dsl_dir_space_towrite(dd) + 1524 dsl_dir_phys(dd)->dd_used_bytes; 1525 parent_space = parent_delta(dd, est_used, space); 1526 mutex_exit(&dd->dd_lock); 1527 1528 /* Make sure that we clean up dd_space_to* */ 1529 dsl_dir_dirty(dd, tx); 1530 1531 dd = dd->dd_parent; 1532 space = parent_space; 1533 } while (space && dd); 1534 } 1535 1536 /* call from syncing context when we actually write/free space for this dd */ 1537 void 1538 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type, 1539 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx) 1540 { 1541 int64_t accounted_delta; 1542 1543 ASSERT(dmu_tx_is_syncing(tx)); 1544 ASSERT(type < DD_USED_NUM); 1545 1546 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1547 1548 /* 1549 * dsl_dataset_set_refreservation_sync_impl() calls this with 1550 * dd_lock held, so that it can atomically update 1551 * ds->ds_reserved and the dsl_dir accounting, so that 1552 * dsl_dataset_check_quota() can see dataset and dir accounting 1553 * consistently. 1554 */ 1555 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock); 1556 if (needlock) 1557 mutex_enter(&dd->dd_lock); 1558 dsl_dir_phys_t *ddp = dsl_dir_phys(dd); 1559 accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used); 1560 ASSERT(used >= 0 || ddp->dd_used_bytes >= -used); 1561 ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed); 1562 ASSERT(uncompressed >= 0 || 1563 ddp->dd_uncompressed_bytes >= -uncompressed); 1564 ddp->dd_used_bytes += used; 1565 ddp->dd_uncompressed_bytes += uncompressed; 1566 ddp->dd_compressed_bytes += compressed; 1567 1568 if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) { 1569 ASSERT(used >= 0 || ddp->dd_used_breakdown[type] >= -used); 1570 ddp->dd_used_breakdown[type] += used; 1571 #ifdef ZFS_DEBUG 1572 { 1573 dd_used_t t; 1574 uint64_t u = 0; 1575 for (t = 0; t < DD_USED_NUM; t++) 1576 u += ddp->dd_used_breakdown[t]; 1577 ASSERT3U(u, ==, ddp->dd_used_bytes); 1578 } 1579 #endif 1580 } 1581 if (needlock) 1582 mutex_exit(&dd->dd_lock); 1583 1584 if (dd->dd_parent != NULL) { 1585 dsl_dir_diduse_transfer_space(dd->dd_parent, 1586 accounted_delta, compressed, uncompressed, 1587 used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx); 1588 } 1589 } 1590 1591 void 1592 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta, 1593 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx) 1594 { 1595 ASSERT(dmu_tx_is_syncing(tx)); 1596 ASSERT(oldtype < DD_USED_NUM); 1597 ASSERT(newtype < DD_USED_NUM); 1598 1599 dsl_dir_phys_t *ddp = dsl_dir_phys(dd); 1600 if (delta == 0 || 1601 !(ddp->dd_flags & DD_FLAG_USED_BREAKDOWN)) 1602 return; 1603 1604 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1605 mutex_enter(&dd->dd_lock); 1606 ASSERT(delta > 0 ? 1607 ddp->dd_used_breakdown[oldtype] >= delta : 1608 ddp->dd_used_breakdown[newtype] >= -delta); 1609 ASSERT(ddp->dd_used_bytes >= ABS(delta)); 1610 ddp->dd_used_breakdown[oldtype] -= delta; 1611 ddp->dd_used_breakdown[newtype] += delta; 1612 mutex_exit(&dd->dd_lock); 1613 } 1614 1615 void 1616 dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used, 1617 int64_t compressed, int64_t uncompressed, int64_t tonew, 1618 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx) 1619 { 1620 int64_t accounted_delta; 1621 1622 ASSERT(dmu_tx_is_syncing(tx)); 1623 ASSERT(oldtype < DD_USED_NUM); 1624 ASSERT(newtype < DD_USED_NUM); 1625 1626 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1627 1628 mutex_enter(&dd->dd_lock); 1629 dsl_dir_phys_t *ddp = dsl_dir_phys(dd); 1630 accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used); 1631 ASSERT(used >= 0 || ddp->dd_used_bytes >= -used); 1632 ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed); 1633 ASSERT(uncompressed >= 0 || 1634 ddp->dd_uncompressed_bytes >= -uncompressed); 1635 ddp->dd_used_bytes += used; 1636 ddp->dd_uncompressed_bytes += uncompressed; 1637 ddp->dd_compressed_bytes += compressed; 1638 1639 if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) { 1640 ASSERT(tonew - used <= 0 || 1641 ddp->dd_used_breakdown[oldtype] >= tonew - used); 1642 ASSERT(tonew >= 0 || 1643 ddp->dd_used_breakdown[newtype] >= -tonew); 1644 ddp->dd_used_breakdown[oldtype] -= tonew - used; 1645 ddp->dd_used_breakdown[newtype] += tonew; 1646 #ifdef ZFS_DEBUG 1647 { 1648 dd_used_t t; 1649 uint64_t u = 0; 1650 for (t = 0; t < DD_USED_NUM; t++) 1651 u += ddp->dd_used_breakdown[t]; 1652 ASSERT3U(u, ==, ddp->dd_used_bytes); 1653 } 1654 #endif 1655 } 1656 mutex_exit(&dd->dd_lock); 1657 1658 if (dd->dd_parent != NULL) { 1659 dsl_dir_diduse_transfer_space(dd->dd_parent, 1660 accounted_delta, compressed, uncompressed, 1661 used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx); 1662 } 1663 } 1664 1665 typedef struct dsl_dir_set_qr_arg { 1666 const char *ddsqra_name; 1667 zprop_source_t ddsqra_source; 1668 uint64_t ddsqra_value; 1669 } dsl_dir_set_qr_arg_t; 1670 1671 static int 1672 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx) 1673 { 1674 dsl_dir_set_qr_arg_t *ddsqra = arg; 1675 dsl_pool_t *dp = dmu_tx_pool(tx); 1676 dsl_dataset_t *ds; 1677 int error; 1678 uint64_t towrite, newval; 1679 1680 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds); 1681 if (error != 0) 1682 return (error); 1683 1684 error = dsl_prop_predict(ds->ds_dir, "quota", 1685 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval); 1686 if (error != 0) { 1687 dsl_dataset_rele(ds, FTAG); 1688 return (error); 1689 } 1690 1691 if (newval == 0) { 1692 dsl_dataset_rele(ds, FTAG); 1693 return (0); 1694 } 1695 1696 mutex_enter(&ds->ds_dir->dd_lock); 1697 /* 1698 * If we are doing the preliminary check in open context, and 1699 * there are pending changes, then don't fail it, since the 1700 * pending changes could under-estimate the amount of space to be 1701 * freed up. 1702 */ 1703 towrite = dsl_dir_space_towrite(ds->ds_dir); 1704 if ((dmu_tx_is_syncing(tx) || towrite == 0) && 1705 (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved || 1706 newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) { 1707 error = SET_ERROR(ENOSPC); 1708 } 1709 mutex_exit(&ds->ds_dir->dd_lock); 1710 dsl_dataset_rele(ds, FTAG); 1711 return (error); 1712 } 1713 1714 static void 1715 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx) 1716 { 1717 dsl_dir_set_qr_arg_t *ddsqra = arg; 1718 dsl_pool_t *dp = dmu_tx_pool(tx); 1719 dsl_dataset_t *ds; 1720 uint64_t newval; 1721 1722 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds)); 1723 1724 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) { 1725 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA), 1726 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1, 1727 &ddsqra->ddsqra_value, tx); 1728 1729 VERIFY0(dsl_prop_get_int_ds(ds, 1730 zfs_prop_to_name(ZFS_PROP_QUOTA), &newval)); 1731 } else { 1732 newval = ddsqra->ddsqra_value; 1733 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld", 1734 zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval); 1735 } 1736 1737 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 1738 mutex_enter(&ds->ds_dir->dd_lock); 1739 dsl_dir_phys(ds->ds_dir)->dd_quota = newval; 1740 mutex_exit(&ds->ds_dir->dd_lock); 1741 dsl_dataset_rele(ds, FTAG); 1742 } 1743 1744 int 1745 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota) 1746 { 1747 dsl_dir_set_qr_arg_t ddsqra; 1748 1749 ddsqra.ddsqra_name = ddname; 1750 ddsqra.ddsqra_source = source; 1751 ddsqra.ddsqra_value = quota; 1752 1753 return (dsl_sync_task(ddname, dsl_dir_set_quota_check, 1754 dsl_dir_set_quota_sync, &ddsqra, 0, 1755 ZFS_SPACE_CHECK_EXTRA_RESERVED)); 1756 } 1757 1758 static int 1759 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx) 1760 { 1761 dsl_dir_set_qr_arg_t *ddsqra = arg; 1762 dsl_pool_t *dp = dmu_tx_pool(tx); 1763 dsl_dataset_t *ds; 1764 dsl_dir_t *dd; 1765 uint64_t newval, used, avail; 1766 int error; 1767 1768 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds); 1769 if (error != 0) 1770 return (error); 1771 dd = ds->ds_dir; 1772 1773 /* 1774 * If we are doing the preliminary check in open context, the 1775 * space estimates may be inaccurate. 1776 */ 1777 if (!dmu_tx_is_syncing(tx)) { 1778 dsl_dataset_rele(ds, FTAG); 1779 return (0); 1780 } 1781 1782 error = dsl_prop_predict(ds->ds_dir, 1783 zfs_prop_to_name(ZFS_PROP_RESERVATION), 1784 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval); 1785 if (error != 0) { 1786 dsl_dataset_rele(ds, FTAG); 1787 return (error); 1788 } 1789 1790 mutex_enter(&dd->dd_lock); 1791 used = dsl_dir_phys(dd)->dd_used_bytes; 1792 mutex_exit(&dd->dd_lock); 1793 1794 if (dd->dd_parent) { 1795 avail = dsl_dir_space_available(dd->dd_parent, 1796 NULL, 0, FALSE); 1797 } else { 1798 avail = dsl_pool_adjustedsize(dd->dd_pool, 1799 ZFS_SPACE_CHECK_NORMAL) - used; 1800 } 1801 1802 if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) { 1803 uint64_t delta = MAX(used, newval) - 1804 MAX(used, dsl_dir_phys(dd)->dd_reserved); 1805 1806 if (delta > avail || 1807 (dsl_dir_phys(dd)->dd_quota > 0 && 1808 newval > dsl_dir_phys(dd)->dd_quota)) 1809 error = SET_ERROR(ENOSPC); 1810 } 1811 1812 dsl_dataset_rele(ds, FTAG); 1813 return (error); 1814 } 1815 1816 void 1817 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx) 1818 { 1819 uint64_t used; 1820 int64_t delta; 1821 1822 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1823 1824 mutex_enter(&dd->dd_lock); 1825 used = dsl_dir_phys(dd)->dd_used_bytes; 1826 delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved); 1827 dsl_dir_phys(dd)->dd_reserved = value; 1828 1829 if (dd->dd_parent != NULL) { 1830 /* Roll up this additional usage into our ancestors */ 1831 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV, 1832 delta, 0, 0, tx); 1833 } 1834 mutex_exit(&dd->dd_lock); 1835 } 1836 1837 static void 1838 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx) 1839 { 1840 dsl_dir_set_qr_arg_t *ddsqra = arg; 1841 dsl_pool_t *dp = dmu_tx_pool(tx); 1842 dsl_dataset_t *ds; 1843 uint64_t newval; 1844 1845 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds)); 1846 1847 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) { 1848 dsl_prop_set_sync_impl(ds, 1849 zfs_prop_to_name(ZFS_PROP_RESERVATION), 1850 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1, 1851 &ddsqra->ddsqra_value, tx); 1852 1853 VERIFY0(dsl_prop_get_int_ds(ds, 1854 zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval)); 1855 } else { 1856 newval = ddsqra->ddsqra_value; 1857 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld", 1858 zfs_prop_to_name(ZFS_PROP_RESERVATION), 1859 (longlong_t)newval); 1860 } 1861 1862 dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx); 1863 dsl_dataset_rele(ds, FTAG); 1864 } 1865 1866 int 1867 dsl_dir_set_reservation(const char *ddname, zprop_source_t source, 1868 uint64_t reservation) 1869 { 1870 dsl_dir_set_qr_arg_t ddsqra; 1871 1872 ddsqra.ddsqra_name = ddname; 1873 ddsqra.ddsqra_source = source; 1874 ddsqra.ddsqra_value = reservation; 1875 1876 return (dsl_sync_task(ddname, dsl_dir_set_reservation_check, 1877 dsl_dir_set_reservation_sync, &ddsqra, 0, 1878 ZFS_SPACE_CHECK_EXTRA_RESERVED)); 1879 } 1880 1881 static dsl_dir_t * 1882 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2) 1883 { 1884 for (; ds1; ds1 = ds1->dd_parent) { 1885 dsl_dir_t *dd; 1886 for (dd = ds2; dd; dd = dd->dd_parent) { 1887 if (ds1 == dd) 1888 return (dd); 1889 } 1890 } 1891 return (NULL); 1892 } 1893 1894 /* 1895 * If delta is applied to dd, how much of that delta would be applied to 1896 * ancestor? Syncing context only. 1897 */ 1898 static int64_t 1899 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor) 1900 { 1901 if (dd == ancestor) 1902 return (delta); 1903 1904 mutex_enter(&dd->dd_lock); 1905 delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta); 1906 mutex_exit(&dd->dd_lock); 1907 return (would_change(dd->dd_parent, delta, ancestor)); 1908 } 1909 1910 typedef struct dsl_dir_rename_arg { 1911 const char *ddra_oldname; 1912 const char *ddra_newname; 1913 cred_t *ddra_cred; 1914 } dsl_dir_rename_arg_t; 1915 1916 typedef struct dsl_valid_rename_arg { 1917 int char_delta; 1918 int nest_delta; 1919 } dsl_valid_rename_arg_t; 1920 1921 static int 1922 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 1923 { 1924 (void) dp; 1925 dsl_valid_rename_arg_t *dvra = arg; 1926 char namebuf[ZFS_MAX_DATASET_NAME_LEN]; 1927 1928 dsl_dataset_name(ds, namebuf); 1929 1930 ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN), 1931 <, ZFS_MAX_DATASET_NAME_LEN); 1932 int namelen = strlen(namebuf) + dvra->char_delta; 1933 int depth = get_dataset_depth(namebuf) + dvra->nest_delta; 1934 1935 if (namelen >= ZFS_MAX_DATASET_NAME_LEN) 1936 return (SET_ERROR(ENAMETOOLONG)); 1937 if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting) 1938 return (SET_ERROR(ENAMETOOLONG)); 1939 return (0); 1940 } 1941 1942 static int 1943 dsl_dir_rename_check(void *arg, dmu_tx_t *tx) 1944 { 1945 dsl_dir_rename_arg_t *ddra = arg; 1946 dsl_pool_t *dp = dmu_tx_pool(tx); 1947 dsl_dir_t *dd, *newparent; 1948 dsl_valid_rename_arg_t dvra; 1949 dsl_dataset_t *parentds; 1950 objset_t *parentos; 1951 const char *mynewname; 1952 int error; 1953 1954 /* target dir should exist */ 1955 error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL); 1956 if (error != 0) 1957 return (error); 1958 1959 /* new parent should exist */ 1960 error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG, 1961 &newparent, &mynewname); 1962 if (error != 0) { 1963 dsl_dir_rele(dd, FTAG); 1964 return (error); 1965 } 1966 1967 /* can't rename to different pool */ 1968 if (dd->dd_pool != newparent->dd_pool) { 1969 dsl_dir_rele(newparent, FTAG); 1970 dsl_dir_rele(dd, FTAG); 1971 return (SET_ERROR(EXDEV)); 1972 } 1973 1974 /* new name should not already exist */ 1975 if (mynewname == NULL) { 1976 dsl_dir_rele(newparent, FTAG); 1977 dsl_dir_rele(dd, FTAG); 1978 return (SET_ERROR(EEXIST)); 1979 } 1980 1981 /* can't rename below anything but filesystems (eg. no ZVOLs) */ 1982 error = dsl_dataset_hold_obj(newparent->dd_pool, 1983 dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds); 1984 if (error != 0) { 1985 dsl_dir_rele(newparent, FTAG); 1986 dsl_dir_rele(dd, FTAG); 1987 return (error); 1988 } 1989 error = dmu_objset_from_ds(parentds, &parentos); 1990 if (error != 0) { 1991 dsl_dataset_rele(parentds, FTAG); 1992 dsl_dir_rele(newparent, FTAG); 1993 dsl_dir_rele(dd, FTAG); 1994 return (error); 1995 } 1996 if (dmu_objset_type(parentos) != DMU_OST_ZFS) { 1997 dsl_dataset_rele(parentds, FTAG); 1998 dsl_dir_rele(newparent, FTAG); 1999 dsl_dir_rele(dd, FTAG); 2000 return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); 2001 } 2002 dsl_dataset_rele(parentds, FTAG); 2003 2004 ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN), 2005 <, ZFS_MAX_DATASET_NAME_LEN); 2006 ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN), 2007 <, ZFS_MAX_DATASET_NAME_LEN); 2008 dvra.char_delta = strlen(ddra->ddra_newname) 2009 - strlen(ddra->ddra_oldname); 2010 dvra.nest_delta = get_dataset_depth(ddra->ddra_newname) 2011 - get_dataset_depth(ddra->ddra_oldname); 2012 2013 /* if the name length is growing, validate child name lengths */ 2014 if (dvra.char_delta > 0 || dvra.nest_delta > 0) { 2015 error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename, 2016 &dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 2017 if (error != 0) { 2018 dsl_dir_rele(newparent, FTAG); 2019 dsl_dir_rele(dd, FTAG); 2020 return (error); 2021 } 2022 } 2023 2024 if (dmu_tx_is_syncing(tx)) { 2025 if (spa_feature_is_active(dp->dp_spa, 2026 SPA_FEATURE_FS_SS_LIMIT)) { 2027 /* 2028 * Although this is the check function and we don't 2029 * normally make on-disk changes in check functions, 2030 * we need to do that here. 2031 * 2032 * Ensure this portion of the tree's counts have been 2033 * initialized in case the new parent has limits set. 2034 */ 2035 dsl_dir_init_fs_ss_count(dd, tx); 2036 } 2037 } 2038 2039 if (newparent != dd->dd_parent) { 2040 /* is there enough space? */ 2041 uint64_t myspace = 2042 MAX(dsl_dir_phys(dd)->dd_used_bytes, 2043 dsl_dir_phys(dd)->dd_reserved); 2044 objset_t *os = dd->dd_pool->dp_meta_objset; 2045 uint64_t fs_cnt = 0; 2046 uint64_t ss_cnt = 0; 2047 2048 if (dsl_dir_is_zapified(dd)) { 2049 int err; 2050 2051 err = zap_lookup(os, dd->dd_object, 2052 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1, 2053 &fs_cnt); 2054 if (err != ENOENT && err != 0) { 2055 dsl_dir_rele(newparent, FTAG); 2056 dsl_dir_rele(dd, FTAG); 2057 return (err); 2058 } 2059 2060 /* 2061 * have to add 1 for the filesystem itself that we're 2062 * moving 2063 */ 2064 fs_cnt++; 2065 2066 err = zap_lookup(os, dd->dd_object, 2067 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1, 2068 &ss_cnt); 2069 if (err != ENOENT && err != 0) { 2070 dsl_dir_rele(newparent, FTAG); 2071 dsl_dir_rele(dd, FTAG); 2072 return (err); 2073 } 2074 } 2075 2076 /* check for encryption errors */ 2077 error = dsl_dir_rename_crypt_check(dd, newparent); 2078 if (error != 0) { 2079 dsl_dir_rele(newparent, FTAG); 2080 dsl_dir_rele(dd, FTAG); 2081 return (SET_ERROR(EACCES)); 2082 } 2083 2084 /* no rename into our descendant */ 2085 if (closest_common_ancestor(dd, newparent) == dd) { 2086 dsl_dir_rele(newparent, FTAG); 2087 dsl_dir_rele(dd, FTAG); 2088 return (SET_ERROR(EINVAL)); 2089 } 2090 2091 error = dsl_dir_transfer_possible(dd->dd_parent, 2092 newparent, fs_cnt, ss_cnt, myspace, ddra->ddra_cred); 2093 if (error != 0) { 2094 dsl_dir_rele(newparent, FTAG); 2095 dsl_dir_rele(dd, FTAG); 2096 return (error); 2097 } 2098 } 2099 2100 dsl_dir_rele(newparent, FTAG); 2101 dsl_dir_rele(dd, FTAG); 2102 return (0); 2103 } 2104 2105 static void 2106 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx) 2107 { 2108 dsl_dir_rename_arg_t *ddra = arg; 2109 dsl_pool_t *dp = dmu_tx_pool(tx); 2110 dsl_dir_t *dd, *newparent; 2111 const char *mynewname; 2112 objset_t *mos = dp->dp_meta_objset; 2113 2114 VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL)); 2115 VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent, 2116 &mynewname)); 2117 2118 ASSERT3P(mynewname, !=, NULL); 2119 2120 /* Log this before we change the name. */ 2121 spa_history_log_internal_dd(dd, "rename", tx, 2122 "-> %s", ddra->ddra_newname); 2123 2124 if (newparent != dd->dd_parent) { 2125 objset_t *os = dd->dd_pool->dp_meta_objset; 2126 uint64_t fs_cnt = 0; 2127 uint64_t ss_cnt = 0; 2128 2129 /* 2130 * We already made sure the dd counts were initialized in the 2131 * check function. 2132 */ 2133 if (spa_feature_is_active(dp->dp_spa, 2134 SPA_FEATURE_FS_SS_LIMIT)) { 2135 VERIFY0(zap_lookup(os, dd->dd_object, 2136 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1, 2137 &fs_cnt)); 2138 /* add 1 for the filesystem itself that we're moving */ 2139 fs_cnt++; 2140 2141 VERIFY0(zap_lookup(os, dd->dd_object, 2142 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1, 2143 &ss_cnt)); 2144 } 2145 2146 dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt, 2147 DD_FIELD_FILESYSTEM_COUNT, tx); 2148 dsl_fs_ss_count_adjust(newparent, fs_cnt, 2149 DD_FIELD_FILESYSTEM_COUNT, tx); 2150 2151 dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt, 2152 DD_FIELD_SNAPSHOT_COUNT, tx); 2153 dsl_fs_ss_count_adjust(newparent, ss_cnt, 2154 DD_FIELD_SNAPSHOT_COUNT, tx); 2155 2156 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD, 2157 -dsl_dir_phys(dd)->dd_used_bytes, 2158 -dsl_dir_phys(dd)->dd_compressed_bytes, 2159 -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx); 2160 dsl_dir_diduse_space(newparent, DD_USED_CHILD, 2161 dsl_dir_phys(dd)->dd_used_bytes, 2162 dsl_dir_phys(dd)->dd_compressed_bytes, 2163 dsl_dir_phys(dd)->dd_uncompressed_bytes, tx); 2164 2165 if (dsl_dir_phys(dd)->dd_reserved > 2166 dsl_dir_phys(dd)->dd_used_bytes) { 2167 uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved - 2168 dsl_dir_phys(dd)->dd_used_bytes; 2169 2170 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV, 2171 -unused_rsrv, 0, 0, tx); 2172 dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV, 2173 unused_rsrv, 0, 0, tx); 2174 } 2175 } 2176 2177 dmu_buf_will_dirty(dd->dd_dbuf, tx); 2178 2179 /* remove from old parent zapobj */ 2180 VERIFY0(zap_remove(mos, 2181 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj, 2182 dd->dd_myname, tx)); 2183 2184 (void) strlcpy(dd->dd_myname, mynewname, 2185 sizeof (dd->dd_myname)); 2186 dsl_dir_rele(dd->dd_parent, dd); 2187 dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object; 2188 VERIFY0(dsl_dir_hold_obj(dp, 2189 newparent->dd_object, NULL, dd, &dd->dd_parent)); 2190 2191 /* add to new parent zapobj */ 2192 VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj, 2193 dd->dd_myname, 8, 1, &dd->dd_object, tx)); 2194 2195 /* TODO: A rename callback to avoid these layering violations. */ 2196 zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname); 2197 zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname, 2198 ddra->ddra_newname, B_TRUE); 2199 2200 dsl_prop_notify_all(dd); 2201 2202 dsl_dir_rele(newparent, FTAG); 2203 dsl_dir_rele(dd, FTAG); 2204 } 2205 2206 int 2207 dsl_dir_rename(const char *oldname, const char *newname) 2208 { 2209 cred_t *cr = CRED(); 2210 crhold(cr); 2211 2212 dsl_dir_rename_arg_t ddra; 2213 2214 ddra.ddra_oldname = oldname; 2215 ddra.ddra_newname = newname; 2216 ddra.ddra_cred = cr; 2217 2218 int err = dsl_sync_task(oldname, 2219 dsl_dir_rename_check, dsl_dir_rename_sync, &ddra, 2220 3, ZFS_SPACE_CHECK_RESERVED); 2221 2222 crfree(cr); 2223 return (err); 2224 } 2225 2226 int 2227 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, 2228 uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, 2229 cred_t *cr) 2230 { 2231 dsl_dir_t *ancestor; 2232 int64_t adelta; 2233 uint64_t avail; 2234 int err; 2235 2236 ancestor = closest_common_ancestor(sdd, tdd); 2237 adelta = would_change(sdd, -space, ancestor); 2238 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE); 2239 if (avail < space) 2240 return (SET_ERROR(ENOSPC)); 2241 2242 err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT, 2243 ancestor, cr); 2244 if (err != 0) 2245 return (err); 2246 err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT, 2247 ancestor, cr); 2248 if (err != 0) 2249 return (err); 2250 2251 return (0); 2252 } 2253 2254 inode_timespec_t 2255 dsl_dir_snap_cmtime(dsl_dir_t *dd) 2256 { 2257 inode_timespec_t t; 2258 2259 mutex_enter(&dd->dd_lock); 2260 t = dd->dd_snap_cmtime; 2261 mutex_exit(&dd->dd_lock); 2262 2263 return (t); 2264 } 2265 2266 void 2267 dsl_dir_snap_cmtime_update(dsl_dir_t *dd, dmu_tx_t *tx) 2268 { 2269 dsl_pool_t *dp = dmu_tx_pool(tx); 2270 inode_timespec_t t; 2271 gethrestime(&t); 2272 2273 mutex_enter(&dd->dd_lock); 2274 dd->dd_snap_cmtime = t; 2275 if (spa_feature_is_enabled(dp->dp_spa, 2276 SPA_FEATURE_EXTENSIBLE_DATASET)) { 2277 objset_t *mos = dd->dd_pool->dp_meta_objset; 2278 uint64_t ddobj = dd->dd_object; 2279 dsl_dir_zapify(dd, tx); 2280 VERIFY0(zap_update(mos, ddobj, 2281 DD_FIELD_SNAPSHOTS_CHANGED, 2282 sizeof (uint64_t), 2283 sizeof (inode_timespec_t) / sizeof (uint64_t), 2284 &t, tx)); 2285 } 2286 mutex_exit(&dd->dd_lock); 2287 } 2288 2289 void 2290 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx) 2291 { 2292 objset_t *mos = dd->dd_pool->dp_meta_objset; 2293 dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx); 2294 } 2295 2296 boolean_t 2297 dsl_dir_is_zapified(dsl_dir_t *dd) 2298 { 2299 dmu_object_info_t doi; 2300 2301 dmu_object_info_from_db(dd->dd_dbuf, &doi); 2302 return (doi.doi_type == DMU_OTN_ZAP_METADATA); 2303 } 2304 2305 int 2306 dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj) 2307 { 2308 objset_t *mos = dd->dd_pool->dp_meta_objset; 2309 ASSERT(spa_feature_is_active(dd->dd_pool->dp_spa, 2310 SPA_FEATURE_LIVELIST)); 2311 int err = dsl_deadlist_open(&dd->dd_livelist, mos, obj); 2312 if (err != 0) 2313 return (err); 2314 bplist_create(&dd->dd_pending_allocs); 2315 bplist_create(&dd->dd_pending_frees); 2316 return (0); 2317 } 2318 2319 void 2320 dsl_dir_livelist_close(dsl_dir_t *dd) 2321 { 2322 dsl_deadlist_close(&dd->dd_livelist); 2323 bplist_destroy(&dd->dd_pending_allocs); 2324 bplist_destroy(&dd->dd_pending_frees); 2325 } 2326 2327 void 2328 dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total) 2329 { 2330 uint64_t obj; 2331 dsl_pool_t *dp = dmu_tx_pool(tx); 2332 spa_t *spa = dp->dp_spa; 2333 livelist_condense_entry_t to_condense = spa->spa_to_condense; 2334 2335 if (!dsl_deadlist_is_open(&dd->dd_livelist)) 2336 return; 2337 2338 /* 2339 * If the livelist being removed is set to be condensed, stop the 2340 * condense zthr and indicate the cancellation in the spa_to_condense 2341 * struct in case the condense no-wait synctask has already started 2342 */ 2343 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr; 2344 if (ll_condense_thread != NULL && 2345 (to_condense.ds != NULL) && (to_condense.ds->ds_dir == dd)) { 2346 /* 2347 * We use zthr_wait_cycle_done instead of zthr_cancel 2348 * because we don't want to destroy the zthr, just have 2349 * it skip its current task. 2350 */ 2351 spa->spa_to_condense.cancelled = B_TRUE; 2352 zthr_wait_cycle_done(ll_condense_thread); 2353 /* 2354 * If we've returned from zthr_wait_cycle_done without 2355 * clearing the to_condense data structure it's either 2356 * because the no-wait synctask has started (which is 2357 * indicated by 'syncing' field of to_condense) and we 2358 * can expect it to clear to_condense on its own. 2359 * Otherwise, we returned before the zthr ran. The 2360 * checkfunc will now fail as cancelled == B_TRUE so we 2361 * can safely NULL out ds, allowing a different dir's 2362 * livelist to be condensed. 2363 * 2364 * We can be sure that the to_condense struct will not 2365 * be repopulated at this stage because both this 2366 * function and dsl_livelist_try_condense execute in 2367 * syncing context. 2368 */ 2369 if ((spa->spa_to_condense.ds != NULL) && 2370 !spa->spa_to_condense.syncing) { 2371 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, 2372 spa); 2373 spa->spa_to_condense.ds = NULL; 2374 } 2375 } 2376 2377 dsl_dir_livelist_close(dd); 2378 VERIFY0(zap_lookup(dp->dp_meta_objset, dd->dd_object, 2379 DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &obj)); 2380 VERIFY0(zap_remove(dp->dp_meta_objset, dd->dd_object, 2381 DD_FIELD_LIVELIST, tx)); 2382 if (total) { 2383 dsl_deadlist_free(dp->dp_meta_objset, obj, tx); 2384 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx); 2385 } 2386 } 2387 2388 static int 2389 dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds, 2390 zfs_wait_activity_t activity, boolean_t *in_progress) 2391 { 2392 int error = 0; 2393 2394 ASSERT(MUTEX_HELD(&dd->dd_activity_lock)); 2395 2396 switch (activity) { 2397 case ZFS_WAIT_DELETEQ: { 2398 #ifdef _KERNEL 2399 objset_t *os; 2400 error = dmu_objset_from_ds(ds, &os); 2401 if (error != 0) 2402 break; 2403 2404 mutex_enter(&os->os_user_ptr_lock); 2405 void *user = dmu_objset_get_user(os); 2406 mutex_exit(&os->os_user_ptr_lock); 2407 if (dmu_objset_type(os) != DMU_OST_ZFS || 2408 user == NULL || zfs_get_vfs_flag_unmounted(os)) { 2409 *in_progress = B_FALSE; 2410 return (0); 2411 } 2412 2413 uint64_t readonly = B_FALSE; 2414 error = zfs_get_temporary_prop(ds, ZFS_PROP_READONLY, &readonly, 2415 NULL); 2416 2417 if (error != 0) 2418 break; 2419 2420 if (readonly || !spa_writeable(dd->dd_pool->dp_spa)) { 2421 *in_progress = B_FALSE; 2422 return (0); 2423 } 2424 2425 uint64_t count, unlinked_obj; 2426 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1, 2427 &unlinked_obj); 2428 if (error != 0) { 2429 dsl_dataset_rele(ds, FTAG); 2430 break; 2431 } 2432 error = zap_count(os, unlinked_obj, &count); 2433 2434 if (error == 0) 2435 *in_progress = (count != 0); 2436 break; 2437 #else 2438 /* 2439 * The delete queue is ZPL specific, and libzpool doesn't have 2440 * it. It doesn't make sense to wait for it. 2441 */ 2442 (void) ds; 2443 *in_progress = B_FALSE; 2444 break; 2445 #endif 2446 } 2447 default: 2448 panic("unrecognized value for activity %d", activity); 2449 } 2450 2451 return (error); 2452 } 2453 2454 int 2455 dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity, 2456 boolean_t *waited) 2457 { 2458 int error = 0; 2459 boolean_t in_progress; 2460 dsl_pool_t *dp = dd->dd_pool; 2461 for (;;) { 2462 dsl_pool_config_enter(dp, FTAG); 2463 error = dsl_dir_activity_in_progress(dd, ds, activity, 2464 &in_progress); 2465 dsl_pool_config_exit(dp, FTAG); 2466 if (error != 0 || !in_progress) 2467 break; 2468 2469 *waited = B_TRUE; 2470 2471 if (cv_wait_sig(&dd->dd_activity_cv, &dd->dd_activity_lock) == 2472 0 || dd->dd_activity_cancelled) { 2473 error = SET_ERROR(EINTR); 2474 break; 2475 } 2476 } 2477 return (error); 2478 } 2479 2480 void 2481 dsl_dir_cancel_waiters(dsl_dir_t *dd) 2482 { 2483 mutex_enter(&dd->dd_activity_lock); 2484 dd->dd_activity_cancelled = B_TRUE; 2485 cv_broadcast(&dd->dd_activity_cv); 2486 while (dd->dd_activity_waiters > 0) 2487 cv_wait(&dd->dd_activity_cv, &dd->dd_activity_lock); 2488 mutex_exit(&dd->dd_activity_lock); 2489 } 2490 2491 #if defined(_KERNEL) 2492 EXPORT_SYMBOL(dsl_dir_set_quota); 2493 EXPORT_SYMBOL(dsl_dir_set_reservation); 2494 #endif 2495 2496 ZFS_MODULE_PARAM(zfs, , zvol_enforce_quotas, INT, ZMOD_RW, 2497 "Enable strict ZVOL quota enforcment"); 2498