1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_objset.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dsl_dataset.h> 32 #include <sys/dsl_dir.h> 33 #include <sys/dsl_prop.h> 34 #include <sys/dsl_synctask.h> 35 #include <sys/dsl_deleg.h> 36 #include <sys/spa.h> 37 #include <sys/zap.h> 38 #include <sys/zio.h> 39 #include <sys/arc.h> 40 #include <sys/sunddi.h> 41 #include "zfs_namecheck.h" 42 43 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd); 44 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2, 45 cred_t *cr, dmu_tx_t *tx); 46 47 48 /* ARGSUSED */ 49 static void 50 dsl_dir_evict(dmu_buf_t *db, void *arg) 51 { 52 dsl_dir_t *dd = arg; 53 dsl_pool_t *dp = dd->dd_pool; 54 int t; 55 56 for (t = 0; t < TXG_SIZE; t++) { 57 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t)); 58 ASSERT(dd->dd_tempreserved[t] == 0); 59 ASSERT(dd->dd_space_towrite[t] == 0); 60 } 61 62 ASSERT3U(dd->dd_used_bytes, ==, dd->dd_phys->dd_used_bytes); 63 64 if (dd->dd_parent) 65 dsl_dir_close(dd->dd_parent, dd); 66 67 spa_close(dd->dd_pool->dp_spa, dd); 68 69 /* 70 * The props callback list should be empty since they hold the 71 * dir open. 72 */ 73 list_destroy(&dd->dd_prop_cbs); 74 mutex_destroy(&dd->dd_lock); 75 kmem_free(dd, sizeof (dsl_dir_t)); 76 } 77 78 int 79 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj, 80 const char *tail, void *tag, dsl_dir_t **ddp) 81 { 82 dmu_buf_t *dbuf; 83 dsl_dir_t *dd; 84 int err; 85 86 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 87 dsl_pool_sync_context(dp)); 88 89 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf); 90 if (err) 91 return (err); 92 dd = dmu_buf_get_user(dbuf); 93 #ifdef ZFS_DEBUG 94 { 95 dmu_object_info_t doi; 96 dmu_object_info_from_db(dbuf, &doi); 97 ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR); 98 } 99 #endif 100 /* XXX assert bonus buffer size is correct */ 101 if (dd == NULL) { 102 dsl_dir_t *winner; 103 int err; 104 105 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP); 106 dd->dd_object = ddobj; 107 dd->dd_dbuf = dbuf; 108 dd->dd_pool = dp; 109 dd->dd_phys = dbuf->db_data; 110 dd->dd_used_bytes = dd->dd_phys->dd_used_bytes; 111 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL); 112 113 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t), 114 offsetof(dsl_prop_cb_record_t, cbr_node)); 115 116 if (dd->dd_phys->dd_parent_obj) { 117 err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj, 118 NULL, dd, &dd->dd_parent); 119 if (err) { 120 mutex_destroy(&dd->dd_lock); 121 kmem_free(dd, sizeof (dsl_dir_t)); 122 dmu_buf_rele(dbuf, tag); 123 return (err); 124 } 125 if (tail) { 126 #ifdef ZFS_DEBUG 127 uint64_t foundobj; 128 129 err = zap_lookup(dp->dp_meta_objset, 130 dd->dd_parent->dd_phys->dd_child_dir_zapobj, 131 tail, sizeof (foundobj), 1, &foundobj); 132 ASSERT(err || foundobj == ddobj); 133 #endif 134 (void) strcpy(dd->dd_myname, tail); 135 } else { 136 err = zap_value_search(dp->dp_meta_objset, 137 dd->dd_parent->dd_phys->dd_child_dir_zapobj, 138 ddobj, 0, dd->dd_myname); 139 } 140 if (err) { 141 dsl_dir_close(dd->dd_parent, dd); 142 mutex_destroy(&dd->dd_lock); 143 kmem_free(dd, sizeof (dsl_dir_t)); 144 dmu_buf_rele(dbuf, tag); 145 return (err); 146 } 147 } else { 148 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa)); 149 } 150 151 winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys, 152 dsl_dir_evict); 153 if (winner) { 154 if (dd->dd_parent) 155 dsl_dir_close(dd->dd_parent, dd); 156 mutex_destroy(&dd->dd_lock); 157 kmem_free(dd, sizeof (dsl_dir_t)); 158 dd = winner; 159 } else { 160 spa_open_ref(dp->dp_spa, dd); 161 } 162 } 163 164 /* 165 * The dsl_dir_t has both open-to-close and instantiate-to-evict 166 * holds on the spa. We need the open-to-close holds because 167 * otherwise the spa_refcnt wouldn't change when we open a 168 * dir which the spa also has open, so we could incorrectly 169 * think it was OK to unload/export/destroy the pool. We need 170 * the instantiate-to-evict hold because the dsl_dir_t has a 171 * pointer to the dd_pool, which has a pointer to the spa_t. 172 */ 173 spa_open_ref(dp->dp_spa, tag); 174 ASSERT3P(dd->dd_pool, ==, dp); 175 ASSERT3U(dd->dd_object, ==, ddobj); 176 ASSERT3P(dd->dd_dbuf, ==, dbuf); 177 *ddp = dd; 178 return (0); 179 } 180 181 void 182 dsl_dir_close(dsl_dir_t *dd, void *tag) 183 { 184 dprintf_dd(dd, "%s\n", ""); 185 spa_close(dd->dd_pool->dp_spa, tag); 186 dmu_buf_rele(dd->dd_dbuf, tag); 187 } 188 189 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */ 190 void 191 dsl_dir_name(dsl_dir_t *dd, char *buf) 192 { 193 if (dd->dd_parent) { 194 dsl_dir_name(dd->dd_parent, buf); 195 (void) strcat(buf, "/"); 196 } else { 197 buf[0] = '\0'; 198 } 199 if (!MUTEX_HELD(&dd->dd_lock)) { 200 /* 201 * recursive mutex so that we can use 202 * dprintf_dd() with dd_lock held 203 */ 204 mutex_enter(&dd->dd_lock); 205 (void) strcat(buf, dd->dd_myname); 206 mutex_exit(&dd->dd_lock); 207 } else { 208 (void) strcat(buf, dd->dd_myname); 209 } 210 } 211 212 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */ 213 int 214 dsl_dir_namelen(dsl_dir_t *dd) 215 { 216 int result = 0; 217 218 if (dd->dd_parent) { 219 /* parent's name + 1 for the "/" */ 220 result = dsl_dir_namelen(dd->dd_parent) + 1; 221 } 222 223 if (!MUTEX_HELD(&dd->dd_lock)) { 224 /* see dsl_dir_name */ 225 mutex_enter(&dd->dd_lock); 226 result += strlen(dd->dd_myname); 227 mutex_exit(&dd->dd_lock); 228 } else { 229 result += strlen(dd->dd_myname); 230 } 231 232 return (result); 233 } 234 235 int 236 dsl_dir_is_private(dsl_dir_t *dd) 237 { 238 int rv = FALSE; 239 240 if (dd->dd_parent && dsl_dir_is_private(dd->dd_parent)) 241 rv = TRUE; 242 if (dataset_name_hidden(dd->dd_myname)) 243 rv = TRUE; 244 return (rv); 245 } 246 247 248 static int 249 getcomponent(const char *path, char *component, const char **nextp) 250 { 251 char *p; 252 if (path == NULL) 253 return (ENOENT); 254 /* This would be a good place to reserve some namespace... */ 255 p = strpbrk(path, "/@"); 256 if (p && (p[1] == '/' || p[1] == '@')) { 257 /* two separators in a row */ 258 return (EINVAL); 259 } 260 if (p == NULL || p == path) { 261 /* 262 * if the first thing is an @ or /, it had better be an 263 * @ and it had better not have any more ats or slashes, 264 * and it had better have something after the @. 265 */ 266 if (p != NULL && 267 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0')) 268 return (EINVAL); 269 if (strlen(path) >= MAXNAMELEN) 270 return (ENAMETOOLONG); 271 (void) strcpy(component, path); 272 p = NULL; 273 } else if (p[0] == '/') { 274 if (p-path >= MAXNAMELEN) 275 return (ENAMETOOLONG); 276 (void) strncpy(component, path, p - path); 277 component[p-path] = '\0'; 278 p++; 279 } else if (p[0] == '@') { 280 /* 281 * if the next separator is an @, there better not be 282 * any more slashes. 283 */ 284 if (strchr(path, '/')) 285 return (EINVAL); 286 if (p-path >= MAXNAMELEN) 287 return (ENAMETOOLONG); 288 (void) strncpy(component, path, p - path); 289 component[p-path] = '\0'; 290 } else { 291 ASSERT(!"invalid p"); 292 } 293 *nextp = p; 294 return (0); 295 } 296 297 /* 298 * same as dsl_open_dir, ignore the first component of name and use the 299 * spa instead 300 */ 301 int 302 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag, 303 dsl_dir_t **ddp, const char **tailp) 304 { 305 char buf[MAXNAMELEN]; 306 const char *next, *nextnext = NULL; 307 int err; 308 dsl_dir_t *dd; 309 dsl_pool_t *dp; 310 uint64_t ddobj; 311 int openedspa = FALSE; 312 313 dprintf("%s\n", name); 314 315 err = getcomponent(name, buf, &next); 316 if (err) 317 return (err); 318 if (spa == NULL) { 319 err = spa_open(buf, &spa, FTAG); 320 if (err) { 321 dprintf("spa_open(%s) failed\n", buf); 322 return (err); 323 } 324 openedspa = TRUE; 325 326 /* XXX this assertion belongs in spa_open */ 327 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa))); 328 } 329 330 dp = spa_get_dsl(spa); 331 332 rw_enter(&dp->dp_config_rwlock, RW_READER); 333 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd); 334 if (err) { 335 rw_exit(&dp->dp_config_rwlock); 336 if (openedspa) 337 spa_close(spa, FTAG); 338 return (err); 339 } 340 341 while (next != NULL) { 342 dsl_dir_t *child_ds; 343 err = getcomponent(next, buf, &nextnext); 344 if (err) 345 break; 346 ASSERT(next[0] != '\0'); 347 if (next[0] == '@') 348 break; 349 dprintf("looking up %s in obj%lld\n", 350 buf, dd->dd_phys->dd_child_dir_zapobj); 351 352 err = zap_lookup(dp->dp_meta_objset, 353 dd->dd_phys->dd_child_dir_zapobj, 354 buf, sizeof (ddobj), 1, &ddobj); 355 if (err) { 356 if (err == ENOENT) 357 err = 0; 358 break; 359 } 360 361 err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds); 362 if (err) 363 break; 364 dsl_dir_close(dd, tag); 365 dd = child_ds; 366 next = nextnext; 367 } 368 rw_exit(&dp->dp_config_rwlock); 369 370 if (err) { 371 dsl_dir_close(dd, tag); 372 if (openedspa) 373 spa_close(spa, FTAG); 374 return (err); 375 } 376 377 /* 378 * It's an error if there's more than one component left, or 379 * tailp==NULL and there's any component left. 380 */ 381 if (next != NULL && 382 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) { 383 /* bad path name */ 384 dsl_dir_close(dd, tag); 385 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp); 386 err = ENOENT; 387 } 388 if (tailp) 389 *tailp = next; 390 if (openedspa) 391 spa_close(spa, FTAG); 392 *ddp = dd; 393 return (err); 394 } 395 396 /* 397 * Return the dsl_dir_t, and possibly the last component which couldn't 398 * be found in *tail. Return NULL if the path is bogus, or if 399 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@' 400 * means that the last component is a snapshot. 401 */ 402 int 403 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp) 404 { 405 return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp)); 406 } 407 408 uint64_t 409 dsl_dir_create_sync(dsl_dir_t *pds, const char *name, dmu_tx_t *tx) 410 { 411 objset_t *mos = pds->dd_pool->dp_meta_objset; 412 uint64_t ddobj; 413 dsl_dir_phys_t *dsphys; 414 dmu_buf_t *dbuf; 415 416 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0, 417 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); 418 VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj, 419 name, sizeof (uint64_t), 1, &ddobj, tx)); 420 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf)); 421 dmu_buf_will_dirty(dbuf, tx); 422 dsphys = dbuf->db_data; 423 424 dsphys->dd_creation_time = gethrestime_sec(); 425 dsphys->dd_parent_obj = pds->dd_object; 426 dsphys->dd_props_zapobj = zap_create(mos, 427 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); 428 dsphys->dd_child_dir_zapobj = zap_create(mos, 429 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); 430 dmu_buf_rele(dbuf, FTAG); 431 432 return (ddobj); 433 } 434 435 /* ARGSUSED */ 436 int 437 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) 438 { 439 dsl_dir_t *dd = arg1; 440 dsl_pool_t *dp = dd->dd_pool; 441 objset_t *mos = dp->dp_meta_objset; 442 int err; 443 uint64_t count; 444 445 /* 446 * There should be exactly two holds, both from 447 * dsl_dataset_destroy: one on the dd directory, and one on its 448 * head ds. Otherwise, someone is trying to lookup something 449 * inside this dir while we want to destroy it. The 450 * config_rwlock ensures that nobody else opens it after we 451 * check. 452 */ 453 if (dmu_buf_refcount(dd->dd_dbuf) > 2) 454 return (EBUSY); 455 456 err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count); 457 if (err) 458 return (err); 459 if (count != 0) 460 return (EEXIST); 461 462 return (0); 463 } 464 465 void 466 dsl_dir_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx) 467 { 468 dsl_dir_t *dd = arg1; 469 objset_t *mos = dd->dd_pool->dp_meta_objset; 470 uint64_t val, obj; 471 472 ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock)); 473 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); 474 475 /* Remove our reservation. */ 476 val = 0; 477 dsl_dir_set_reservation_sync(dd, &val, cr, tx); 478 ASSERT3U(dd->dd_used_bytes, ==, 0); 479 ASSERT3U(dd->dd_phys->dd_reserved, ==, 0); 480 481 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx)); 482 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx)); 483 VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx)); 484 VERIFY(0 == zap_remove(mos, 485 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx)); 486 487 obj = dd->dd_object; 488 dsl_dir_close(dd, tag); 489 VERIFY(0 == dmu_object_free(mos, obj, tx)); 490 } 491 492 void 493 dsl_dir_create_root(objset_t *mos, uint64_t *ddobjp, dmu_tx_t *tx) 494 { 495 dsl_dir_phys_t *dsp; 496 dmu_buf_t *dbuf; 497 int error; 498 499 *ddobjp = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0, 500 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); 501 502 error = zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET, 503 sizeof (uint64_t), 1, ddobjp, tx); 504 ASSERT3U(error, ==, 0); 505 506 VERIFY(0 == dmu_bonus_hold(mos, *ddobjp, FTAG, &dbuf)); 507 dmu_buf_will_dirty(dbuf, tx); 508 dsp = dbuf->db_data; 509 510 dsp->dd_creation_time = gethrestime_sec(); 511 dsp->dd_props_zapobj = zap_create(mos, 512 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); 513 dsp->dd_child_dir_zapobj = zap_create(mos, 514 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); 515 516 dmu_buf_rele(dbuf, FTAG); 517 } 518 519 void 520 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv) 521 { 522 mutex_enter(&dd->dd_lock); 523 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, dd->dd_used_bytes); 524 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota); 525 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION, 526 dd->dd_phys->dd_reserved); 527 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, 528 dd->dd_phys->dd_compressed_bytes == 0 ? 100 : 529 (dd->dd_phys->dd_uncompressed_bytes * 100 / 530 dd->dd_phys->dd_compressed_bytes)); 531 mutex_exit(&dd->dd_lock); 532 533 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 534 if (dd->dd_phys->dd_origin_obj) { 535 dsl_dataset_t *ds; 536 char buf[MAXNAMELEN]; 537 538 VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, 539 dd->dd_phys->dd_origin_obj, 540 NULL, DS_MODE_NONE, FTAG, &ds)); 541 dsl_dataset_name(ds, buf); 542 dsl_dataset_close(ds, DS_MODE_NONE, FTAG); 543 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf); 544 } 545 rw_exit(&dd->dd_pool->dp_config_rwlock); 546 } 547 548 void 549 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx) 550 { 551 dsl_pool_t *dp = dd->dd_pool; 552 553 ASSERT(dd->dd_phys); 554 555 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) { 556 /* up the hold count until we can be written out */ 557 dmu_buf_add_ref(dd->dd_dbuf, dd); 558 } 559 } 560 561 static int64_t 562 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta) 563 { 564 uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved); 565 uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved); 566 return (new_accounted - old_accounted); 567 } 568 569 void 570 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx) 571 { 572 ASSERT(dmu_tx_is_syncing(tx)); 573 574 dmu_buf_will_dirty(dd->dd_dbuf, tx); 575 576 mutex_enter(&dd->dd_lock); 577 ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0); 578 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg, 579 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024); 580 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0; 581 dd->dd_phys->dd_used_bytes = dd->dd_used_bytes; 582 mutex_exit(&dd->dd_lock); 583 584 /* release the hold from dsl_dir_dirty */ 585 dmu_buf_rele(dd->dd_dbuf, dd); 586 } 587 588 static uint64_t 589 dsl_dir_space_towrite(dsl_dir_t *dd) 590 { 591 uint64_t space = 0; 592 int i; 593 594 ASSERT(MUTEX_HELD(&dd->dd_lock)); 595 596 for (i = 0; i < TXG_SIZE; i++) { 597 space += dd->dd_space_towrite[i&TXG_MASK]; 598 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0); 599 } 600 return (space); 601 } 602 603 /* 604 * How much space would dd have available if ancestor had delta applied 605 * to it? If ondiskonly is set, we're only interested in what's 606 * on-disk, not estimated pending changes. 607 */ 608 uint64_t 609 dsl_dir_space_available(dsl_dir_t *dd, 610 dsl_dir_t *ancestor, int64_t delta, int ondiskonly) 611 { 612 uint64_t parentspace, myspace, quota, used; 613 614 /* 615 * If there are no restrictions otherwise, assume we have 616 * unlimited space available. 617 */ 618 quota = UINT64_MAX; 619 parentspace = UINT64_MAX; 620 621 if (dd->dd_parent != NULL) { 622 parentspace = dsl_dir_space_available(dd->dd_parent, 623 ancestor, delta, ondiskonly); 624 } 625 626 mutex_enter(&dd->dd_lock); 627 if (dd->dd_phys->dd_quota != 0) 628 quota = dd->dd_phys->dd_quota; 629 used = dd->dd_used_bytes; 630 if (!ondiskonly) 631 used += dsl_dir_space_towrite(dd); 632 if (dd == ancestor) 633 used += delta; 634 635 if (dd->dd_parent == NULL) { 636 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE); 637 quota = MIN(quota, poolsize); 638 } 639 640 if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) { 641 /* 642 * We have some space reserved, in addition to what our 643 * parent gave us. 644 */ 645 parentspace += dd->dd_phys->dd_reserved - used; 646 } 647 648 if (used > quota) { 649 /* over quota */ 650 myspace = 0; 651 652 /* 653 * While it's OK to be a little over quota, if 654 * we think we are using more space than there 655 * is in the pool (which is already 1.6% more than 656 * dsl_pool_adjustedsize()), something is very 657 * wrong. 658 */ 659 ASSERT3U(used, <=, spa_get_space(dd->dd_pool->dp_spa)); 660 } else { 661 /* 662 * the lesser of the space provided by our parent and 663 * the space left in our quota 664 */ 665 myspace = MIN(parentspace, quota - used); 666 } 667 668 mutex_exit(&dd->dd_lock); 669 670 return (myspace); 671 } 672 673 struct tempreserve { 674 list_node_t tr_node; 675 dsl_pool_t *tr_dp; 676 dsl_dir_t *tr_ds; 677 uint64_t tr_size; 678 }; 679 680 static int 681 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree, 682 boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list, 683 dmu_tx_t *tx, boolean_t first) 684 { 685 uint64_t txg = tx->tx_txg; 686 uint64_t est_inflight, used_on_disk, quota, parent_rsrv; 687 struct tempreserve *tr; 688 int enospc = EDQUOT; 689 int txgidx = txg & TXG_MASK; 690 int i; 691 uint64_t ref_rsrv = 0; 692 693 ASSERT3U(txg, !=, 0); 694 ASSERT3S(asize, >, 0); 695 696 mutex_enter(&dd->dd_lock); 697 698 /* 699 * Check against the dsl_dir's quota. We don't add in the delta 700 * when checking for over-quota because they get one free hit. 701 */ 702 est_inflight = dsl_dir_space_towrite(dd); 703 for (i = 0; i < TXG_SIZE; i++) 704 est_inflight += dd->dd_tempreserved[i]; 705 used_on_disk = dd->dd_used_bytes; 706 707 /* 708 * On the first iteration, fetch the dataset's used-on-disk and 709 * refreservation values. Also, if checkrefquota is set, test if 710 * allocating this space would exceed the dataset's refquota. 711 */ 712 if (first && tx->tx_objset) { 713 int error; 714 dsl_dataset_t *ds = tx->tx_objset->os->os_dsl_dataset; 715 716 error = dsl_dataset_check_quota(ds, checkrefquota, 717 asize, est_inflight, &used_on_disk, &ref_rsrv); 718 if (error) { 719 mutex_exit(&dd->dd_lock); 720 return (error); 721 } 722 } 723 724 /* 725 * If this transaction will result in a net free of space, 726 * we want to let it through. 727 */ 728 if (ignorequota || netfree || dd->dd_phys->dd_quota == 0) 729 quota = UINT64_MAX; 730 else 731 quota = dd->dd_phys->dd_quota; 732 733 /* 734 * Adjust the quota against the actual pool size at the root. 735 * To ensure that it's possible to remove files from a full 736 * pool without inducing transient overcommits, we throttle 737 * netfree transactions against a quota that is slightly larger, 738 * but still within the pool's allocation slop. In cases where 739 * we're very close to full, this will allow a steady trickle of 740 * removes to get through. 741 */ 742 if (dd->dd_parent == NULL) { 743 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree); 744 if (poolsize < quota) { 745 quota = poolsize; 746 enospc = ENOSPC; 747 } 748 } 749 750 /* 751 * If they are requesting more space, and our current estimate 752 * is over quota, they get to try again unless the actual 753 * on-disk is over quota and there are no pending changes (which 754 * may free up space for us). 755 */ 756 if (used_on_disk + est_inflight > quota) { 757 if (est_inflight > 0 || used_on_disk < quota) 758 enospc = ERESTART; 759 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK " 760 "quota=%lluK tr=%lluK err=%d\n", 761 used_on_disk>>10, est_inflight>>10, 762 quota>>10, asize>>10, enospc); 763 mutex_exit(&dd->dd_lock); 764 return (enospc); 765 } 766 767 /* We need to up our estimated delta before dropping dd_lock */ 768 dd->dd_tempreserved[txgidx] += asize; 769 770 parent_rsrv = parent_delta(dd, used_on_disk + est_inflight, 771 asize - ref_rsrv); 772 mutex_exit(&dd->dd_lock); 773 774 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); 775 tr->tr_ds = dd; 776 tr->tr_size = asize; 777 list_insert_tail(tr_list, tr); 778 779 /* see if it's OK with our parent */ 780 if (dd->dd_parent && parent_rsrv) { 781 boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0); 782 783 return (dsl_dir_tempreserve_impl(dd->dd_parent, 784 parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE)); 785 } else { 786 return (0); 787 } 788 } 789 790 /* 791 * Reserve space in this dsl_dir, to be used in this tx's txg. 792 * After the space has been dirtied (and dsl_dir_willuse_space() 793 * has been called), the reservation should be canceled, using 794 * dsl_dir_tempreserve_clear(). 795 */ 796 int 797 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize, 798 uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx) 799 { 800 int err; 801 list_t *tr_list; 802 803 if (asize == 0) { 804 *tr_cookiep = NULL; 805 return (0); 806 } 807 808 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP); 809 list_create(tr_list, sizeof (struct tempreserve), 810 offsetof(struct tempreserve, tr_node)); 811 ASSERT3S(asize, >, 0); 812 ASSERT3S(fsize, >=, 0); 813 814 err = arc_tempreserve_space(lsize, tx->tx_txg); 815 if (err == 0) { 816 struct tempreserve *tr; 817 818 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); 819 tr->tr_size = lsize; 820 list_insert_tail(tr_list, tr); 821 822 err = dsl_pool_tempreserve_space(dd->dd_pool, asize, tx); 823 } else { 824 if (err == EAGAIN) { 825 txg_delay(dd->dd_pool, tx->tx_txg, 1); 826 err = ERESTART; 827 } 828 dsl_pool_memory_pressure(dd->dd_pool); 829 } 830 831 if (err == 0) { 832 struct tempreserve *tr; 833 834 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP); 835 tr->tr_dp = dd->dd_pool; 836 tr->tr_size = asize; 837 list_insert_tail(tr_list, tr); 838 839 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize, 840 FALSE, asize > usize, tr_list, tx, TRUE); 841 } 842 843 if (err) 844 dsl_dir_tempreserve_clear(tr_list, tx); 845 else 846 *tr_cookiep = tr_list; 847 848 return (err); 849 } 850 851 /* 852 * Clear a temporary reservation that we previously made with 853 * dsl_dir_tempreserve_space(). 854 */ 855 void 856 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx) 857 { 858 int txgidx = tx->tx_txg & TXG_MASK; 859 list_t *tr_list = tr_cookie; 860 struct tempreserve *tr; 861 862 ASSERT3U(tx->tx_txg, !=, 0); 863 864 if (tr_cookie == NULL) 865 return; 866 867 while (tr = list_head(tr_list)) { 868 if (tr->tr_dp) { 869 dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx); 870 } else if (tr->tr_ds) { 871 mutex_enter(&tr->tr_ds->dd_lock); 872 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=, 873 tr->tr_size); 874 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size; 875 mutex_exit(&tr->tr_ds->dd_lock); 876 } else { 877 arc_tempreserve_clear(tr->tr_size); 878 } 879 list_remove(tr_list, tr); 880 kmem_free(tr, sizeof (struct tempreserve)); 881 } 882 883 kmem_free(tr_list, sizeof (list_t)); 884 } 885 886 static void 887 dsl_dir_willuse_space_impl(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx) 888 { 889 int64_t parent_space; 890 uint64_t est_used; 891 892 mutex_enter(&dd->dd_lock); 893 if (space > 0) 894 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space; 895 896 est_used = dsl_dir_space_towrite(dd) + dd->dd_used_bytes; 897 parent_space = parent_delta(dd, est_used, space); 898 mutex_exit(&dd->dd_lock); 899 900 /* Make sure that we clean up dd_space_to* */ 901 dsl_dir_dirty(dd, tx); 902 903 /* XXX this is potentially expensive and unnecessary... */ 904 if (parent_space && dd->dd_parent) 905 dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx); 906 } 907 908 /* 909 * Call in open context when we think we're going to write/free space, 910 * eg. when dirtying data. Be conservative (ie. OK to write less than 911 * this or free more than this, but don't write more or free less). 912 */ 913 void 914 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx) 915 { 916 dsl_pool_willuse_space(dd->dd_pool, space, tx); 917 dsl_dir_willuse_space_impl(dd, space, tx); 918 } 919 920 /* call from syncing context when we actually write/free space for this dd */ 921 void 922 dsl_dir_diduse_space(dsl_dir_t *dd, 923 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx) 924 { 925 int64_t accounted_delta; 926 927 ASSERT(dmu_tx_is_syncing(tx)); 928 929 dsl_dir_dirty(dd, tx); 930 931 mutex_enter(&dd->dd_lock); 932 accounted_delta = parent_delta(dd, dd->dd_used_bytes, used); 933 ASSERT(used >= 0 || dd->dd_used_bytes >= -used); 934 ASSERT(compressed >= 0 || 935 dd->dd_phys->dd_compressed_bytes >= -compressed); 936 ASSERT(uncompressed >= 0 || 937 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed); 938 dd->dd_used_bytes += used; 939 dd->dd_phys->dd_uncompressed_bytes += uncompressed; 940 dd->dd_phys->dd_compressed_bytes += compressed; 941 mutex_exit(&dd->dd_lock); 942 943 if (dd->dd_parent != NULL) { 944 dsl_dir_diduse_space(dd->dd_parent, 945 accounted_delta, compressed, uncompressed, tx); 946 } 947 } 948 949 static int 950 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) 951 { 952 dsl_dir_t *dd = arg1; 953 uint64_t *quotap = arg2; 954 uint64_t new_quota = *quotap; 955 int err = 0; 956 uint64_t towrite; 957 958 if (new_quota == 0) 959 return (0); 960 961 mutex_enter(&dd->dd_lock); 962 /* 963 * If we are doing the preliminary check in open context, and 964 * there are pending changes, then don't fail it, since the 965 * pending changes could under-estimate the amount of space to be 966 * freed up. 967 */ 968 towrite = dsl_dir_space_towrite(dd); 969 if ((dmu_tx_is_syncing(tx) || towrite == 0) && 970 (new_quota < dd->dd_phys->dd_reserved || 971 new_quota < dd->dd_used_bytes + towrite)) { 972 err = ENOSPC; 973 } 974 mutex_exit(&dd->dd_lock); 975 return (err); 976 } 977 978 /* ARGSUSED */ 979 static void 980 dsl_dir_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 981 { 982 dsl_dir_t *dd = arg1; 983 uint64_t *quotap = arg2; 984 uint64_t new_quota = *quotap; 985 986 dmu_buf_will_dirty(dd->dd_dbuf, tx); 987 988 mutex_enter(&dd->dd_lock); 989 dd->dd_phys->dd_quota = new_quota; 990 mutex_exit(&dd->dd_lock); 991 992 spa_history_internal_log(LOG_DS_QUOTA, dd->dd_pool->dp_spa, 993 tx, cr, "%lld dataset = %llu ", 994 (longlong_t)new_quota, dd->dd_phys->dd_head_dataset_obj); 995 } 996 997 int 998 dsl_dir_set_quota(const char *ddname, uint64_t quota) 999 { 1000 dsl_dir_t *dd; 1001 int err; 1002 1003 err = dsl_dir_open(ddname, FTAG, &dd, NULL); 1004 if (err) 1005 return (err); 1006 1007 if (quota != dd->dd_phys->dd_quota) { 1008 /* 1009 * If someone removes a file, then tries to set the quota, we 1010 * want to make sure the file freeing takes effect. 1011 */ 1012 txg_wait_open(dd->dd_pool, 0); 1013 1014 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check, 1015 dsl_dir_set_quota_sync, dd, "a, 0); 1016 } 1017 dsl_dir_close(dd, FTAG); 1018 return (err); 1019 } 1020 1021 int 1022 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) 1023 { 1024 dsl_dir_t *dd = arg1; 1025 uint64_t *reservationp = arg2; 1026 uint64_t new_reservation = *reservationp; 1027 uint64_t used, avail; 1028 int64_t delta; 1029 1030 if (new_reservation > INT64_MAX) 1031 return (EOVERFLOW); 1032 1033 /* 1034 * If we are doing the preliminary check in open context, the 1035 * space estimates may be inaccurate. 1036 */ 1037 if (!dmu_tx_is_syncing(tx)) 1038 return (0); 1039 1040 mutex_enter(&dd->dd_lock); 1041 used = dd->dd_used_bytes; 1042 delta = MAX(used, new_reservation) - 1043 MAX(used, dd->dd_phys->dd_reserved); 1044 mutex_exit(&dd->dd_lock); 1045 1046 if (dd->dd_parent) { 1047 avail = dsl_dir_space_available(dd->dd_parent, 1048 NULL, 0, FALSE); 1049 } else { 1050 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used; 1051 } 1052 1053 if (delta > 0 && delta > avail) 1054 return (ENOSPC); 1055 if (delta > 0 && dd->dd_phys->dd_quota > 0 && 1056 new_reservation > dd->dd_phys->dd_quota) 1057 return (ENOSPC); 1058 return (0); 1059 } 1060 1061 /* ARGSUSED */ 1062 static void 1063 dsl_dir_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1064 { 1065 dsl_dir_t *dd = arg1; 1066 uint64_t *reservationp = arg2; 1067 uint64_t new_reservation = *reservationp; 1068 uint64_t used; 1069 int64_t delta; 1070 1071 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1072 1073 mutex_enter(&dd->dd_lock); 1074 used = dd->dd_used_bytes; 1075 delta = MAX(used, new_reservation) - 1076 MAX(used, dd->dd_phys->dd_reserved); 1077 dd->dd_phys->dd_reserved = new_reservation; 1078 mutex_exit(&dd->dd_lock); 1079 1080 if (dd->dd_parent != NULL) { 1081 /* Roll up this additional usage into our ancestors */ 1082 dsl_dir_diduse_space(dd->dd_parent, delta, 0, 0, tx); 1083 } 1084 1085 spa_history_internal_log(LOG_DS_RESERVATION, dd->dd_pool->dp_spa, 1086 tx, cr, "%lld dataset = %llu", 1087 (longlong_t)new_reservation, dd->dd_phys->dd_head_dataset_obj); 1088 } 1089 1090 int 1091 dsl_dir_set_reservation(const char *ddname, uint64_t reservation) 1092 { 1093 dsl_dir_t *dd; 1094 int err; 1095 1096 err = dsl_dir_open(ddname, FTAG, &dd, NULL); 1097 if (err) 1098 return (err); 1099 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check, 1100 dsl_dir_set_reservation_sync, dd, &reservation, 0); 1101 dsl_dir_close(dd, FTAG); 1102 return (err); 1103 } 1104 1105 static dsl_dir_t * 1106 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2) 1107 { 1108 for (; ds1; ds1 = ds1->dd_parent) { 1109 dsl_dir_t *dd; 1110 for (dd = ds2; dd; dd = dd->dd_parent) { 1111 if (ds1 == dd) 1112 return (dd); 1113 } 1114 } 1115 return (NULL); 1116 } 1117 1118 /* 1119 * If delta is applied to dd, how much of that delta would be applied to 1120 * ancestor? Syncing context only. 1121 */ 1122 static int64_t 1123 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor) 1124 { 1125 if (dd == ancestor) 1126 return (delta); 1127 1128 mutex_enter(&dd->dd_lock); 1129 delta = parent_delta(dd, dd->dd_used_bytes, delta); 1130 mutex_exit(&dd->dd_lock); 1131 return (would_change(dd->dd_parent, delta, ancestor)); 1132 } 1133 1134 struct renamearg { 1135 dsl_dir_t *newparent; 1136 const char *mynewname; 1137 }; 1138 1139 /*ARGSUSED*/ 1140 static int 1141 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) 1142 { 1143 dsl_dir_t *dd = arg1; 1144 struct renamearg *ra = arg2; 1145 dsl_pool_t *dp = dd->dd_pool; 1146 objset_t *mos = dp->dp_meta_objset; 1147 int err; 1148 uint64_t val; 1149 1150 /* There should be 2 references: the open and the dirty */ 1151 if (dmu_buf_refcount(dd->dd_dbuf) > 2) 1152 return (EBUSY); 1153 1154 /* check for existing name */ 1155 err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj, 1156 ra->mynewname, 8, 1, &val); 1157 if (err == 0) 1158 return (EEXIST); 1159 if (err != ENOENT) 1160 return (err); 1161 1162 if (ra->newparent != dd->dd_parent) { 1163 /* is there enough space? */ 1164 uint64_t myspace = 1165 MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved); 1166 1167 /* no rename into our descendant */ 1168 if (closest_common_ancestor(dd, ra->newparent) == dd) 1169 return (EINVAL); 1170 1171 if (err = dsl_dir_transfer_possible(dd->dd_parent, 1172 ra->newparent, myspace)) 1173 return (err); 1174 } 1175 1176 return (0); 1177 } 1178 1179 static void 1180 dsl_dir_rename_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1181 { 1182 dsl_dir_t *dd = arg1; 1183 struct renamearg *ra = arg2; 1184 dsl_pool_t *dp = dd->dd_pool; 1185 objset_t *mos = dp->dp_meta_objset; 1186 int err; 1187 1188 ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2); 1189 1190 if (ra->newparent != dd->dd_parent) { 1191 uint64_t myspace = 1192 MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved); 1193 1194 dsl_dir_diduse_space(dd->dd_parent, -myspace, 1195 -dd->dd_phys->dd_compressed_bytes, 1196 -dd->dd_phys->dd_uncompressed_bytes, tx); 1197 dsl_dir_diduse_space(ra->newparent, myspace, 1198 dd->dd_phys->dd_compressed_bytes, 1199 dd->dd_phys->dd_uncompressed_bytes, tx); 1200 } 1201 1202 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1203 1204 /* remove from old parent zapobj */ 1205 err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj, 1206 dd->dd_myname, tx); 1207 ASSERT3U(err, ==, 0); 1208 1209 (void) strcpy(dd->dd_myname, ra->mynewname); 1210 dsl_dir_close(dd->dd_parent, dd); 1211 dd->dd_phys->dd_parent_obj = ra->newparent->dd_object; 1212 VERIFY(0 == dsl_dir_open_obj(dd->dd_pool, 1213 ra->newparent->dd_object, NULL, dd, &dd->dd_parent)); 1214 1215 /* add to new parent zapobj */ 1216 err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj, 1217 dd->dd_myname, 8, 1, &dd->dd_object, tx); 1218 ASSERT3U(err, ==, 0); 1219 1220 spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, 1221 tx, cr, "dataset = %llu", dd->dd_phys->dd_head_dataset_obj); 1222 } 1223 1224 int 1225 dsl_dir_rename(dsl_dir_t *dd, const char *newname) 1226 { 1227 struct renamearg ra; 1228 int err; 1229 1230 /* new parent should exist */ 1231 err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname); 1232 if (err) 1233 return (err); 1234 1235 /* can't rename to different pool */ 1236 if (dd->dd_pool != ra.newparent->dd_pool) { 1237 err = ENXIO; 1238 goto out; 1239 } 1240 1241 /* new name should not already exist */ 1242 if (ra.mynewname == NULL) { 1243 err = EEXIST; 1244 goto out; 1245 } 1246 1247 err = dsl_sync_task_do(dd->dd_pool, 1248 dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3); 1249 1250 out: 1251 dsl_dir_close(ra.newparent, FTAG); 1252 return (err); 1253 } 1254 1255 int 1256 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space) 1257 { 1258 dsl_dir_t *ancestor; 1259 int64_t adelta; 1260 uint64_t avail; 1261 1262 ancestor = closest_common_ancestor(sdd, tdd); 1263 adelta = would_change(sdd, -space, ancestor); 1264 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE); 1265 if (avail < space) 1266 return (ENOSPC); 1267 1268 return (0); 1269 } 1270