1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_tx.h> 30 #include <sys/dsl_dataset.h> 31 #include <sys/dsl_dir.h> 32 #include <sys/dsl_prop.h> 33 #include <sys/dsl_synctask.h> 34 #include <sys/spa.h> 35 #include <sys/zap.h> 36 #include <sys/zio.h> 37 #include <sys/arc.h> 38 #include "zfs_namecheck.h" 39 40 static uint64_t dsl_dir_estimated_space(dsl_dir_t *dd); 41 static uint64_t dsl_dir_space_available(dsl_dir_t *dd, 42 dsl_dir_t *ancestor, int64_t delta, int ondiskonly); 43 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx); 44 45 46 /* ARGSUSED */ 47 static void 48 dsl_dir_evict(dmu_buf_t *db, void *arg) 49 { 50 dsl_dir_t *dd = arg; 51 dsl_pool_t *dp = dd->dd_pool; 52 int t; 53 54 for (t = 0; t < TXG_SIZE; t++) { 55 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t)); 56 ASSERT(dd->dd_tempreserved[t] == 0); 57 ASSERT(dd->dd_space_towrite[t] == 0); 58 } 59 60 ASSERT3U(dd->dd_used_bytes, ==, dd->dd_phys->dd_used_bytes); 61 62 if (dd->dd_parent) 63 dsl_dir_close(dd->dd_parent, dd); 64 65 spa_close(dd->dd_pool->dp_spa, dd); 66 67 /* 68 * The props callback list should be empty since they hold the 69 * dir open. 70 */ 71 list_destroy(&dd->dd_prop_cbs); 72 mutex_destroy(&dd->dd_lock); 73 kmem_free(dd, sizeof (dsl_dir_t)); 74 } 75 76 int 77 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj, 78 const char *tail, void *tag, dsl_dir_t **ddp) 79 { 80 dmu_buf_t *dbuf; 81 dsl_dir_t *dd; 82 int err; 83 84 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 85 dsl_pool_sync_context(dp)); 86 87 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf); 88 if (err) 89 return (err); 90 dd = dmu_buf_get_user(dbuf); 91 #ifdef ZFS_DEBUG 92 { 93 dmu_object_info_t doi; 94 dmu_object_info_from_db(dbuf, &doi); 95 ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR); 96 } 97 #endif 98 /* XXX assert bonus buffer size is correct */ 99 if (dd == NULL) { 100 dsl_dir_t *winner; 101 int err; 102 103 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP); 104 dd->dd_object = ddobj; 105 dd->dd_dbuf = dbuf; 106 dd->dd_pool = dp; 107 dd->dd_phys = dbuf->db_data; 108 dd->dd_used_bytes = dd->dd_phys->dd_used_bytes; 109 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL); 110 111 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t), 112 offsetof(dsl_prop_cb_record_t, cbr_node)); 113 114 if (dd->dd_phys->dd_parent_obj) { 115 err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj, 116 NULL, dd, &dd->dd_parent); 117 if (err) { 118 mutex_destroy(&dd->dd_lock); 119 kmem_free(dd, sizeof (dsl_dir_t)); 120 dmu_buf_rele(dbuf, tag); 121 return (err); 122 } 123 if (tail) { 124 #ifdef ZFS_DEBUG 125 uint64_t foundobj; 126 127 err = zap_lookup(dp->dp_meta_objset, 128 dd->dd_parent->dd_phys-> 129 dd_child_dir_zapobj, 130 tail, sizeof (foundobj), 1, &foundobj); 131 ASSERT(err || foundobj == ddobj); 132 #endif 133 (void) strcpy(dd->dd_myname, tail); 134 } else { 135 err = zap_value_search(dp->dp_meta_objset, 136 dd->dd_parent->dd_phys-> 137 dd_child_dir_zapobj, 138 ddobj, dd->dd_myname); 139 } 140 if (err) { 141 dsl_dir_close(dd->dd_parent, dd); 142 mutex_destroy(&dd->dd_lock); 143 kmem_free(dd, sizeof (dsl_dir_t)); 144 dmu_buf_rele(dbuf, tag); 145 return (err); 146 } 147 } else { 148 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa)); 149 } 150 151 winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys, 152 dsl_dir_evict); 153 if (winner) { 154 if (dd->dd_parent) 155 dsl_dir_close(dd->dd_parent, dd); 156 mutex_destroy(&dd->dd_lock); 157 kmem_free(dd, sizeof (dsl_dir_t)); 158 dd = winner; 159 } else { 160 spa_open_ref(dp->dp_spa, dd); 161 } 162 } 163 164 /* 165 * The dsl_dir_t has both open-to-close and instantiate-to-evict 166 * holds on the spa. We need the open-to-close holds because 167 * otherwise the spa_refcnt wouldn't change when we open a 168 * dir which the spa also has open, so we could incorrectly 169 * think it was OK to unload/export/destroy the pool. We need 170 * the instantiate-to-evict hold because the dsl_dir_t has a 171 * pointer to the dd_pool, which has a pointer to the spa_t. 172 */ 173 spa_open_ref(dp->dp_spa, tag); 174 ASSERT3P(dd->dd_pool, ==, dp); 175 ASSERT3U(dd->dd_object, ==, ddobj); 176 ASSERT3P(dd->dd_dbuf, ==, dbuf); 177 *ddp = dd; 178 return (0); 179 } 180 181 void 182 dsl_dir_close(dsl_dir_t *dd, void *tag) 183 { 184 dprintf_dd(dd, "%s\n", ""); 185 spa_close(dd->dd_pool->dp_spa, tag); 186 dmu_buf_rele(dd->dd_dbuf, tag); 187 } 188 189 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */ 190 void 191 dsl_dir_name(dsl_dir_t *dd, char *buf) 192 { 193 if (dd->dd_parent) { 194 dsl_dir_name(dd->dd_parent, buf); 195 (void) strcat(buf, "/"); 196 } else { 197 buf[0] = '\0'; 198 } 199 if (!MUTEX_HELD(&dd->dd_lock)) { 200 /* 201 * recursive mutex so that we can use 202 * dprintf_dd() with dd_lock held 203 */ 204 mutex_enter(&dd->dd_lock); 205 (void) strcat(buf, dd->dd_myname); 206 mutex_exit(&dd->dd_lock); 207 } else { 208 (void) strcat(buf, dd->dd_myname); 209 } 210 } 211 212 int 213 dsl_dir_is_private(dsl_dir_t *dd) 214 { 215 int rv = FALSE; 216 217 if (dd->dd_parent && dsl_dir_is_private(dd->dd_parent)) 218 rv = TRUE; 219 if (dataset_name_hidden(dd->dd_myname)) 220 rv = TRUE; 221 return (rv); 222 } 223 224 225 static int 226 getcomponent(const char *path, char *component, const char **nextp) 227 { 228 char *p; 229 if (path == NULL) 230 return (ENOENT); 231 /* This would be a good place to reserve some namespace... */ 232 p = strpbrk(path, "/@"); 233 if (p && (p[1] == '/' || p[1] == '@')) { 234 /* two separators in a row */ 235 return (EINVAL); 236 } 237 if (p == NULL || p == path) { 238 /* 239 * if the first thing is an @ or /, it had better be an 240 * @ and it had better not have any more ats or slashes, 241 * and it had better have something after the @. 242 */ 243 if (p != NULL && 244 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0')) 245 return (EINVAL); 246 if (strlen(path) >= MAXNAMELEN) 247 return (ENAMETOOLONG); 248 (void) strcpy(component, path); 249 p = NULL; 250 } else if (p[0] == '/') { 251 if (p-path >= MAXNAMELEN) 252 return (ENAMETOOLONG); 253 (void) strncpy(component, path, p - path); 254 component[p-path] = '\0'; 255 p++; 256 } else if (p[0] == '@') { 257 /* 258 * if the next separator is an @, there better not be 259 * any more slashes. 260 */ 261 if (strchr(path, '/')) 262 return (EINVAL); 263 if (p-path >= MAXNAMELEN) 264 return (ENAMETOOLONG); 265 (void) strncpy(component, path, p - path); 266 component[p-path] = '\0'; 267 } else { 268 ASSERT(!"invalid p"); 269 } 270 *nextp = p; 271 return (0); 272 } 273 274 /* 275 * same as dsl_open_dir, ignore the first component of name and use the 276 * spa instead 277 */ 278 int 279 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag, 280 dsl_dir_t **ddp, const char **tailp) 281 { 282 char buf[MAXNAMELEN]; 283 const char *next, *nextnext = NULL; 284 int err; 285 dsl_dir_t *dd; 286 dsl_pool_t *dp; 287 uint64_t ddobj; 288 int openedspa = FALSE; 289 290 dprintf("%s\n", name); 291 292 err = getcomponent(name, buf, &next); 293 if (err) 294 return (err); 295 if (spa == NULL) { 296 err = spa_open(buf, &spa, FTAG); 297 if (err) { 298 dprintf("spa_open(%s) failed\n", buf); 299 return (err); 300 } 301 openedspa = TRUE; 302 303 /* XXX this assertion belongs in spa_open */ 304 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa))); 305 } 306 307 dp = spa_get_dsl(spa); 308 309 rw_enter(&dp->dp_config_rwlock, RW_READER); 310 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd); 311 if (err) { 312 rw_exit(&dp->dp_config_rwlock); 313 if (openedspa) 314 spa_close(spa, FTAG); 315 return (err); 316 } 317 318 while (next != NULL) { 319 dsl_dir_t *child_ds; 320 err = getcomponent(next, buf, &nextnext); 321 if (err) 322 break; 323 ASSERT(next[0] != '\0'); 324 if (next[0] == '@') 325 break; 326 dprintf("looking up %s in obj%lld\n", 327 buf, dd->dd_phys->dd_child_dir_zapobj); 328 329 err = zap_lookup(dp->dp_meta_objset, 330 dd->dd_phys->dd_child_dir_zapobj, 331 buf, sizeof (ddobj), 1, &ddobj); 332 if (err) { 333 if (err == ENOENT) 334 err = 0; 335 break; 336 } 337 338 err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds); 339 if (err) 340 break; 341 dsl_dir_close(dd, tag); 342 dd = child_ds; 343 next = nextnext; 344 } 345 rw_exit(&dp->dp_config_rwlock); 346 347 if (err) { 348 dsl_dir_close(dd, tag); 349 if (openedspa) 350 spa_close(spa, FTAG); 351 return (err); 352 } 353 354 /* 355 * It's an error if there's more than one component left, or 356 * tailp==NULL and there's any component left. 357 */ 358 if (next != NULL && 359 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) { 360 /* bad path name */ 361 dsl_dir_close(dd, tag); 362 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp); 363 err = ENOENT; 364 } 365 if (tailp) 366 *tailp = next; 367 if (openedspa) 368 spa_close(spa, FTAG); 369 *ddp = dd; 370 return (err); 371 } 372 373 /* 374 * Return the dsl_dir_t, and possibly the last component which couldn't 375 * be found in *tail. Return NULL if the path is bogus, or if 376 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@' 377 * means that the last component is a snapshot. 378 */ 379 int 380 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp) 381 { 382 return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp)); 383 } 384 385 uint64_t 386 dsl_dir_create_sync(dsl_dir_t *pds, const char *name, dmu_tx_t *tx) 387 { 388 objset_t *mos = pds->dd_pool->dp_meta_objset; 389 uint64_t ddobj; 390 dsl_dir_phys_t *dsphys; 391 dmu_buf_t *dbuf; 392 393 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0, 394 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); 395 VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj, 396 name, sizeof (uint64_t), 1, &ddobj, tx)); 397 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf)); 398 dmu_buf_will_dirty(dbuf, tx); 399 dsphys = dbuf->db_data; 400 401 dsphys->dd_creation_time = gethrestime_sec(); 402 dsphys->dd_parent_obj = pds->dd_object; 403 dsphys->dd_props_zapobj = zap_create(mos, 404 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); 405 dsphys->dd_child_dir_zapobj = zap_create(mos, 406 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); 407 dmu_buf_rele(dbuf, FTAG); 408 409 return (ddobj); 410 } 411 412 /* ARGSUSED */ 413 int 414 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) 415 { 416 dsl_dir_t *dd = arg1; 417 dsl_pool_t *dp = dd->dd_pool; 418 objset_t *mos = dp->dp_meta_objset; 419 int err; 420 uint64_t count; 421 422 /* 423 * There should be exactly two holds, both from 424 * dsl_dataset_destroy: one on the dd directory, and one on its 425 * head ds. Otherwise, someone is trying to lookup something 426 * inside this dir while we want to destroy it. The 427 * config_rwlock ensures that nobody else opens it after we 428 * check. 429 */ 430 if (dmu_buf_refcount(dd->dd_dbuf) > 2) 431 return (EBUSY); 432 433 err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count); 434 if (err) 435 return (err); 436 if (count != 0) 437 return (EEXIST); 438 439 return (0); 440 } 441 442 void 443 dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) 444 { 445 dsl_dir_t *dd = arg1; 446 objset_t *mos = dd->dd_pool->dp_meta_objset; 447 uint64_t val, obj; 448 449 ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock)); 450 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); 451 452 /* Remove our reservation. */ 453 val = 0; 454 dsl_dir_set_reservation_sync(dd, &val, tx); 455 ASSERT3U(dd->dd_used_bytes, ==, 0); 456 ASSERT3U(dd->dd_phys->dd_reserved, ==, 0); 457 458 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx)); 459 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx)); 460 VERIFY(0 == zap_remove(mos, 461 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx)); 462 463 obj = dd->dd_object; 464 dsl_dir_close(dd, tag); 465 VERIFY(0 == dmu_object_free(mos, obj, tx)); 466 } 467 468 void 469 dsl_dir_create_root(objset_t *mos, uint64_t *ddobjp, dmu_tx_t *tx) 470 { 471 dsl_dir_phys_t *dsp; 472 dmu_buf_t *dbuf; 473 int error; 474 475 *ddobjp = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0, 476 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx); 477 478 error = zap_add(mos, DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ROOT_DATASET, 479 sizeof (uint64_t), 1, ddobjp, tx); 480 ASSERT3U(error, ==, 0); 481 482 VERIFY(0 == dmu_bonus_hold(mos, *ddobjp, FTAG, &dbuf)); 483 dmu_buf_will_dirty(dbuf, tx); 484 dsp = dbuf->db_data; 485 486 dsp->dd_creation_time = gethrestime_sec(); 487 dsp->dd_props_zapobj = zap_create(mos, 488 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx); 489 dsp->dd_child_dir_zapobj = zap_create(mos, 490 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx); 491 492 dmu_buf_rele(dbuf, FTAG); 493 } 494 495 void 496 dsl_dir_stats(dsl_dir_t *dd, dmu_objset_stats_t *dds) 497 { 498 bzero(dds, sizeof (dmu_objset_stats_t)); 499 500 dds->dds_available = dsl_dir_space_available(dd, NULL, 0, TRUE); 501 502 mutex_enter(&dd->dd_lock); 503 dds->dds_space_used = dd->dd_used_bytes; 504 dds->dds_compressed_bytes = dd->dd_phys->dd_compressed_bytes; 505 dds->dds_uncompressed_bytes = dd->dd_phys->dd_uncompressed_bytes; 506 dds->dds_quota = dd->dd_phys->dd_quota; 507 dds->dds_reserved = dd->dd_phys->dd_reserved; 508 mutex_exit(&dd->dd_lock); 509 510 dds->dds_creation_time = dd->dd_phys->dd_creation_time; 511 512 if (dd->dd_phys->dd_clone_parent_obj) { 513 dsl_dataset_t *ds; 514 515 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 516 VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, 517 dd->dd_phys->dd_clone_parent_obj, 518 NULL, DS_MODE_NONE, FTAG, &ds)); 519 dsl_dataset_name(ds, dds->dds_clone_of); 520 dsl_dataset_close(ds, DS_MODE_NONE, FTAG); 521 rw_exit(&dd->dd_pool->dp_config_rwlock); 522 } 523 } 524 525 void 526 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx) 527 { 528 dsl_pool_t *dp = dd->dd_pool; 529 530 ASSERT(dd->dd_phys); 531 532 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) { 533 /* up the hold count until we can be written out */ 534 dmu_buf_add_ref(dd->dd_dbuf, dd); 535 } 536 } 537 538 static int64_t 539 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta) 540 { 541 uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved); 542 uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved); 543 return (new_accounted - old_accounted); 544 } 545 546 void 547 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx) 548 { 549 ASSERT(dmu_tx_is_syncing(tx)); 550 551 dmu_buf_will_dirty(dd->dd_dbuf, tx); 552 553 mutex_enter(&dd->dd_lock); 554 ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0); 555 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg, 556 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024); 557 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0; 558 dd->dd_phys->dd_used_bytes = dd->dd_used_bytes; 559 mutex_exit(&dd->dd_lock); 560 561 /* release the hold from dsl_dir_dirty */ 562 dmu_buf_rele(dd->dd_dbuf, dd); 563 } 564 565 static uint64_t 566 dsl_dir_estimated_space(dsl_dir_t *dd) 567 { 568 int64_t space; 569 int i; 570 571 ASSERT(MUTEX_HELD(&dd->dd_lock)); 572 573 space = dd->dd_phys->dd_used_bytes; 574 ASSERT(space >= 0); 575 for (i = 0; i < TXG_SIZE; i++) { 576 space += dd->dd_space_towrite[i&TXG_MASK]; 577 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0); 578 } 579 return (space); 580 } 581 582 /* 583 * How much space would dd have available if ancestor had delta applied 584 * to it? If ondiskonly is set, we're only interested in what's 585 * on-disk, not estimated pending changes. 586 */ 587 static uint64_t 588 dsl_dir_space_available(dsl_dir_t *dd, 589 dsl_dir_t *ancestor, int64_t delta, int ondiskonly) 590 { 591 uint64_t parentspace, myspace, quota, used; 592 593 /* 594 * If there are no restrictions otherwise, assume we have 595 * unlimited space available. 596 */ 597 quota = UINT64_MAX; 598 parentspace = UINT64_MAX; 599 600 if (dd->dd_parent != NULL) { 601 parentspace = dsl_dir_space_available(dd->dd_parent, 602 ancestor, delta, ondiskonly); 603 } 604 605 mutex_enter(&dd->dd_lock); 606 if (dd->dd_phys->dd_quota != 0) 607 quota = dd->dd_phys->dd_quota; 608 if (ondiskonly) { 609 used = dd->dd_used_bytes; 610 } else { 611 used = dsl_dir_estimated_space(dd); 612 } 613 if (dd == ancestor) 614 used += delta; 615 616 if (dd->dd_parent == NULL) { 617 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE); 618 quota = MIN(quota, poolsize); 619 } 620 621 if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) { 622 /* 623 * We have some space reserved, in addition to what our 624 * parent gave us. 625 */ 626 parentspace += dd->dd_phys->dd_reserved - used; 627 } 628 629 if (used > quota) { 630 /* over quota */ 631 myspace = 0; 632 633 /* 634 * While it's OK to be a little over quota, if 635 * we think we are using more space than there 636 * is in the pool (which is already 1.6% more than 637 * dsl_pool_adjustedsize()), something is very 638 * wrong. 639 */ 640 ASSERT3U(used, <=, spa_get_space(dd->dd_pool->dp_spa)); 641 } else { 642 /* 643 * the lesser of the space provided by our parent and 644 * the space left in our quota 645 */ 646 myspace = MIN(parentspace, quota - used); 647 } 648 649 mutex_exit(&dd->dd_lock); 650 651 return (myspace); 652 } 653 654 struct tempreserve { 655 list_node_t tr_node; 656 dsl_dir_t *tr_ds; 657 uint64_t tr_size; 658 }; 659 660 /* 661 * Reserve space in this dsl_dir, to be used in this tx's txg. 662 * After the space has been dirtied (and thus 663 * dsl_dir_willuse_space() has been called), the reservation should 664 * be canceled, using dsl_dir_tempreserve_clear(). 665 */ 666 static int 667 dsl_dir_tempreserve_impl(dsl_dir_t *dd, 668 uint64_t asize, boolean_t netfree, list_t *tr_list, dmu_tx_t *tx) 669 { 670 uint64_t txg = tx->tx_txg; 671 uint64_t est_used, quota, parent_rsrv; 672 int edquot = EDQUOT; 673 int txgidx = txg & TXG_MASK; 674 int i; 675 struct tempreserve *tr; 676 677 ASSERT3U(txg, !=, 0); 678 ASSERT3S(asize, >=, 0); 679 680 mutex_enter(&dd->dd_lock); 681 /* 682 * Check against the dsl_dir's quota. We don't add in the delta 683 * when checking for over-quota because they get one free hit. 684 */ 685 est_used = dsl_dir_estimated_space(dd); 686 for (i = 0; i < TXG_SIZE; i++) 687 est_used += dd->dd_tempreserved[i]; 688 689 quota = UINT64_MAX; 690 691 if (dd->dd_phys->dd_quota) 692 quota = dd->dd_phys->dd_quota; 693 694 /* 695 * If this transaction will result in a net free of space, we want 696 * to let it through, but we have to be careful: the space that it 697 * frees won't become available until *after* this txg syncs. 698 * Therefore, to ensure that it's possible to remove files from 699 * a full pool without inducing transient overcommits, we throttle 700 * netfree transactions against a quota that is slightly larger, 701 * but still within the pool's allocation slop. In cases where 702 * we're very close to full, this will allow a steady trickle of 703 * removes to get through. 704 */ 705 if (dd->dd_parent == NULL) { 706 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree); 707 if (poolsize < quota) { 708 quota = poolsize; 709 edquot = ENOSPC; 710 } 711 } else if (netfree) { 712 quota = UINT64_MAX; 713 } 714 715 /* 716 * If they are requesting more space, and our current estimate 717 * is over quota. They get to try again unless the actual 718 * on-disk is over quota and there are no pending changes (which 719 * may free up space for us). 720 */ 721 if (asize > 0 && est_used > quota) { 722 if (dd->dd_space_towrite[txg & TXG_MASK] != 0 || 723 dd->dd_space_towrite[(txg-1) & TXG_MASK] != 0 || 724 dd->dd_space_towrite[(txg-2) & TXG_MASK] != 0 || 725 dd->dd_used_bytes < quota) 726 edquot = ERESTART; 727 dprintf_dd(dd, "failing: used=%lluK est_used = %lluK " 728 "quota=%lluK tr=%lluK err=%d\n", 729 dd->dd_used_bytes>>10, est_used>>10, 730 quota>>10, asize>>10, edquot); 731 mutex_exit(&dd->dd_lock); 732 return (edquot); 733 } 734 735 /* We need to up our estimated delta before dropping dd_lock */ 736 dd->dd_tempreserved[txgidx] += asize; 737 738 parent_rsrv = parent_delta(dd, est_used, asize); 739 mutex_exit(&dd->dd_lock); 740 741 tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP); 742 tr->tr_ds = dd; 743 tr->tr_size = asize; 744 list_insert_tail(tr_list, tr); 745 746 /* see if it's OK with our parent */ 747 if (dd->dd_parent && parent_rsrv) { 748 return (dsl_dir_tempreserve_impl(dd->dd_parent, 749 parent_rsrv, netfree, tr_list, tx)); 750 } else { 751 return (0); 752 } 753 } 754 755 /* 756 * Reserve space in this dsl_dir, to be used in this tx's txg. 757 * After the space has been dirtied (and thus 758 * dsl_dir_willuse_space() has been called), the reservation should 759 * be canceled, using dsl_dir_tempreserve_clear(). 760 */ 761 int 762 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, 763 uint64_t asize, uint64_t fsize, void **tr_cookiep, dmu_tx_t *tx) 764 { 765 int err = 0; 766 list_t *tr_list; 767 768 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP); 769 list_create(tr_list, sizeof (struct tempreserve), 770 offsetof(struct tempreserve, tr_node)); 771 ASSERT3S(asize, >=, 0); 772 ASSERT3S(fsize, >=, 0); 773 774 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize, 775 tr_list, tx); 776 777 if (err == 0) { 778 struct tempreserve *tr; 779 780 err = arc_tempreserve_space(lsize); 781 if (err == 0) { 782 tr = kmem_alloc(sizeof (struct tempreserve), KM_SLEEP); 783 tr->tr_ds = NULL; 784 tr->tr_size = lsize; 785 list_insert_tail(tr_list, tr); 786 } 787 } 788 789 if (err) 790 dsl_dir_tempreserve_clear(tr_list, tx); 791 else 792 *tr_cookiep = tr_list; 793 return (err); 794 } 795 796 /* 797 * Clear a temporary reservation that we previously made with 798 * dsl_dir_tempreserve_space(). 799 */ 800 void 801 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx) 802 { 803 int txgidx = tx->tx_txg & TXG_MASK; 804 list_t *tr_list = tr_cookie; 805 struct tempreserve *tr; 806 807 ASSERT3U(tx->tx_txg, !=, 0); 808 809 while (tr = list_head(tr_list)) { 810 if (tr->tr_ds == NULL) { 811 arc_tempreserve_clear(tr->tr_size); 812 } else { 813 mutex_enter(&tr->tr_ds->dd_lock); 814 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=, 815 tr->tr_size); 816 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size; 817 mutex_exit(&tr->tr_ds->dd_lock); 818 } 819 list_remove(tr_list, tr); 820 kmem_free(tr, sizeof (struct tempreserve)); 821 } 822 823 kmem_free(tr_list, sizeof (list_t)); 824 } 825 826 /* 827 * Call in open context when we think we're going to write/free space, 828 * eg. when dirtying data. Be conservative (ie. OK to write less than 829 * this or free more than this, but don't write more or free less). 830 */ 831 void 832 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx) 833 { 834 int64_t parent_space; 835 uint64_t est_used; 836 837 mutex_enter(&dd->dd_lock); 838 if (space > 0) 839 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space; 840 841 est_used = dsl_dir_estimated_space(dd); 842 parent_space = parent_delta(dd, est_used, space); 843 mutex_exit(&dd->dd_lock); 844 845 /* Make sure that we clean up dd_space_to* */ 846 dsl_dir_dirty(dd, tx); 847 848 /* XXX this is potentially expensive and unnecessary... */ 849 if (parent_space && dd->dd_parent) 850 dsl_dir_willuse_space(dd->dd_parent, parent_space, tx); 851 } 852 853 /* call from syncing context when we actually write/free space for this dd */ 854 void 855 dsl_dir_diduse_space(dsl_dir_t *dd, 856 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx) 857 { 858 int64_t accounted_delta; 859 860 ASSERT(dmu_tx_is_syncing(tx)); 861 862 dsl_dir_dirty(dd, tx); 863 864 mutex_enter(&dd->dd_lock); 865 accounted_delta = parent_delta(dd, dd->dd_used_bytes, used); 866 ASSERT(used >= 0 || dd->dd_used_bytes >= -used); 867 ASSERT(compressed >= 0 || 868 dd->dd_phys->dd_compressed_bytes >= -compressed); 869 ASSERT(uncompressed >= 0 || 870 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed); 871 dd->dd_used_bytes += used; 872 dd->dd_phys->dd_uncompressed_bytes += uncompressed; 873 dd->dd_phys->dd_compressed_bytes += compressed; 874 mutex_exit(&dd->dd_lock); 875 876 if (dd->dd_parent != NULL) { 877 dsl_dir_diduse_space(dd->dd_parent, 878 accounted_delta, compressed, uncompressed, tx); 879 } 880 } 881 882 /* ARGSUSED */ 883 static int 884 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) 885 { 886 dsl_dir_t *dd = arg1; 887 uint64_t *quotap = arg2; 888 uint64_t new_quota = *quotap; 889 int err = 0; 890 uint64_t towrite; 891 892 if (new_quota == 0) 893 return (0); 894 895 mutex_enter(&dd->dd_lock); 896 /* 897 * If we are doing the preliminary check in open context, and 898 * there are pending changes, then don't fail it, since the 899 * pending changes could under-estimat the amount of space to be 900 * freed up. 901 */ 902 towrite = dd->dd_space_towrite[0] + dd->dd_space_towrite[1] + 903 dd->dd_space_towrite[2] + dd->dd_space_towrite[3]; 904 if ((dmu_tx_is_syncing(tx) || towrite == 0) && 905 (new_quota < dd->dd_phys->dd_reserved || 906 new_quota < dsl_dir_estimated_space(dd))) { 907 err = ENOSPC; 908 } 909 mutex_exit(&dd->dd_lock); 910 return (err); 911 } 912 913 static void 914 dsl_dir_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx) 915 { 916 dsl_dir_t *dd = arg1; 917 uint64_t *quotap = arg2; 918 uint64_t new_quota = *quotap; 919 920 dmu_buf_will_dirty(dd->dd_dbuf, tx); 921 922 mutex_enter(&dd->dd_lock); 923 dd->dd_phys->dd_quota = new_quota; 924 mutex_exit(&dd->dd_lock); 925 } 926 927 int 928 dsl_dir_set_quota(const char *ddname, uint64_t quota) 929 { 930 dsl_dir_t *dd; 931 int err; 932 933 err = dsl_dir_open(ddname, FTAG, &dd, NULL); 934 if (err) 935 return (err); 936 /* 937 * If someone removes a file, then tries to set the quota, we 938 * want to make sure the file freeing takes effect. 939 */ 940 txg_wait_open(dd->dd_pool, 0); 941 942 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check, 943 dsl_dir_set_quota_sync, dd, "a, 0); 944 dsl_dir_close(dd, FTAG); 945 return (err); 946 } 947 948 /* ARGSUSED */ 949 static int 950 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) 951 { 952 dsl_dir_t *dd = arg1; 953 uint64_t *reservationp = arg2; 954 uint64_t new_reservation = *reservationp; 955 uint64_t used, avail; 956 int64_t delta; 957 958 if (new_reservation > INT64_MAX) 959 return (EOVERFLOW); 960 961 /* 962 * If we are doing the preliminary check in open context, the 963 * space estimates may be inaccurate. 964 */ 965 if (!dmu_tx_is_syncing(tx)) 966 return (0); 967 968 mutex_enter(&dd->dd_lock); 969 used = dd->dd_used_bytes; 970 delta = MAX(used, new_reservation) - 971 MAX(used, dd->dd_phys->dd_reserved); 972 mutex_exit(&dd->dd_lock); 973 974 if (dd->dd_parent) { 975 avail = dsl_dir_space_available(dd->dd_parent, 976 NULL, 0, FALSE); 977 } else { 978 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used; 979 } 980 981 if (delta > 0 && delta > avail) 982 return (ENOSPC); 983 if (delta > 0 && dd->dd_phys->dd_quota > 0 && 984 new_reservation > dd->dd_phys->dd_quota) 985 return (ENOSPC); 986 return (0); 987 } 988 989 static void 990 dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx) 991 { 992 dsl_dir_t *dd = arg1; 993 uint64_t *reservationp = arg2; 994 uint64_t new_reservation = *reservationp; 995 uint64_t used; 996 int64_t delta; 997 998 mutex_enter(&dd->dd_lock); 999 used = dd->dd_used_bytes; 1000 delta = MAX(used, new_reservation) - 1001 MAX(used, dd->dd_phys->dd_reserved); 1002 mutex_exit(&dd->dd_lock); 1003 1004 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1005 dd->dd_phys->dd_reserved = new_reservation; 1006 1007 if (dd->dd_parent != NULL) { 1008 /* Roll up this additional usage into our ancestors */ 1009 dsl_dir_diduse_space(dd->dd_parent, delta, 0, 0, tx); 1010 } 1011 } 1012 1013 int 1014 dsl_dir_set_reservation(const char *ddname, uint64_t reservation) 1015 { 1016 dsl_dir_t *dd; 1017 int err; 1018 1019 err = dsl_dir_open(ddname, FTAG, &dd, NULL); 1020 if (err) 1021 return (err); 1022 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check, 1023 dsl_dir_set_reservation_sync, dd, &reservation, 0); 1024 dsl_dir_close(dd, FTAG); 1025 return (err); 1026 } 1027 1028 static dsl_dir_t * 1029 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2) 1030 { 1031 for (; ds1; ds1 = ds1->dd_parent) { 1032 dsl_dir_t *dd; 1033 for (dd = ds2; dd; dd = dd->dd_parent) { 1034 if (ds1 == dd) 1035 return (dd); 1036 } 1037 } 1038 return (NULL); 1039 } 1040 1041 /* 1042 * If delta is applied to dd, how much of that delta would be applied to 1043 * ancestor? Syncing context only. 1044 */ 1045 static int64_t 1046 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor) 1047 { 1048 if (dd == ancestor) 1049 return (delta); 1050 1051 mutex_enter(&dd->dd_lock); 1052 delta = parent_delta(dd, dd->dd_used_bytes, delta); 1053 mutex_exit(&dd->dd_lock); 1054 return (would_change(dd->dd_parent, delta, ancestor)); 1055 } 1056 1057 struct renamearg { 1058 dsl_dir_t *newparent; 1059 const char *mynewname; 1060 }; 1061 1062 /* ARGSUSED */ 1063 static int 1064 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) 1065 { 1066 dsl_dir_t *dd = arg1; 1067 struct renamearg *ra = arg2; 1068 dsl_pool_t *dp = dd->dd_pool; 1069 objset_t *mos = dp->dp_meta_objset; 1070 int err; 1071 uint64_t val; 1072 1073 /* There should be 2 references: the open and the dirty */ 1074 if (dmu_buf_refcount(dd->dd_dbuf) > 2) 1075 return (EBUSY); 1076 1077 /* check for existing name */ 1078 err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj, 1079 ra->mynewname, 8, 1, &val); 1080 if (err == 0) 1081 return (EEXIST); 1082 if (err != ENOENT) 1083 return (err); 1084 1085 if (ra->newparent != dd->dd_parent) { 1086 /* is there enough space? */ 1087 uint64_t myspace = 1088 MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved); 1089 1090 /* no rename into our descendant */ 1091 if (closest_common_ancestor(dd, ra->newparent) == dd) 1092 return (EINVAL); 1093 1094 if (err = dsl_dir_transfer_possible(dd->dd_parent, 1095 ra->newparent, myspace)) 1096 return (err); 1097 } 1098 1099 return (0); 1100 } 1101 1102 static void 1103 dsl_dir_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx) 1104 { 1105 dsl_dir_t *dd = arg1; 1106 struct renamearg *ra = arg2; 1107 dsl_pool_t *dp = dd->dd_pool; 1108 objset_t *mos = dp->dp_meta_objset; 1109 int err; 1110 1111 ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2); 1112 1113 if (ra->newparent != dd->dd_parent) { 1114 uint64_t myspace = 1115 MAX(dd->dd_used_bytes, dd->dd_phys->dd_reserved); 1116 1117 dsl_dir_diduse_space(dd->dd_parent, -myspace, 1118 -dd->dd_phys->dd_compressed_bytes, 1119 -dd->dd_phys->dd_uncompressed_bytes, tx); 1120 dsl_dir_diduse_space(ra->newparent, myspace, 1121 dd->dd_phys->dd_compressed_bytes, 1122 dd->dd_phys->dd_uncompressed_bytes, tx); 1123 } 1124 1125 dmu_buf_will_dirty(dd->dd_dbuf, tx); 1126 1127 /* remove from old parent zapobj */ 1128 err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj, 1129 dd->dd_myname, tx); 1130 ASSERT3U(err, ==, 0); 1131 1132 (void) strcpy(dd->dd_myname, ra->mynewname); 1133 dsl_dir_close(dd->dd_parent, dd); 1134 dd->dd_phys->dd_parent_obj = ra->newparent->dd_object; 1135 VERIFY(0 == dsl_dir_open_obj(dd->dd_pool, 1136 ra->newparent->dd_object, NULL, dd, &dd->dd_parent)); 1137 1138 /* add to new parent zapobj */ 1139 err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj, 1140 dd->dd_myname, 8, 1, &dd->dd_object, tx); 1141 ASSERT3U(err, ==, 0); 1142 } 1143 1144 int 1145 dsl_dir_rename(dsl_dir_t *dd, const char *newname) 1146 { 1147 struct renamearg ra; 1148 int err; 1149 1150 /* new parent should exist */ 1151 err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname); 1152 if (err) 1153 return (err); 1154 1155 /* can't rename to different pool */ 1156 if (dd->dd_pool != ra.newparent->dd_pool) { 1157 err = ENXIO; 1158 goto out; 1159 } 1160 1161 /* new name should not already exist */ 1162 if (ra.mynewname == NULL) { 1163 err = EEXIST; 1164 goto out; 1165 } 1166 1167 1168 err = dsl_sync_task_do(dd->dd_pool, 1169 dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3); 1170 1171 out: 1172 dsl_dir_close(ra.newparent, FTAG); 1173 return (err); 1174 } 1175 1176 int 1177 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space) 1178 { 1179 dsl_dir_t *ancestor; 1180 int64_t adelta; 1181 uint64_t avail; 1182 1183 ancestor = closest_common_ancestor(sdd, tdd); 1184 adelta = would_change(sdd, -space, ancestor); 1185 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE); 1186 if (avail < space) 1187 return (ENOSPC); 1188 1189 return (0); 1190 } 1191