1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 */ 26 27 /* Portions Copyright 2010 Robert Milkowski */ 28 29 #include <sys/cred.h> 30 #include <sys/zfs_context.h> 31 #include <sys/dmu_objset.h> 32 #include <sys/dsl_dir.h> 33 #include <sys/dsl_dataset.h> 34 #include <sys/dsl_prop.h> 35 #include <sys/dsl_pool.h> 36 #include <sys/dsl_synctask.h> 37 #include <sys/dsl_deleg.h> 38 #include <sys/dnode.h> 39 #include <sys/dbuf.h> 40 #include <sys/zvol.h> 41 #include <sys/dmu_tx.h> 42 #include <sys/zap.h> 43 #include <sys/zil.h> 44 #include <sys/dmu_impl.h> 45 #include <sys/zfs_ioctl.h> 46 #include <sys/sa.h> 47 #include <sys/zfs_onexit.h> 48 #include <sys/dsl_destroy.h> 49 50 /* 51 * Needed to close a window in dnode_move() that allows the objset to be freed 52 * before it can be safely accessed. 53 */ 54 krwlock_t os_lock; 55 56 void 57 dmu_objset_init(void) 58 { 59 rw_init(&os_lock, NULL, RW_DEFAULT, NULL); 60 } 61 62 void 63 dmu_objset_fini(void) 64 { 65 rw_destroy(&os_lock); 66 } 67 68 spa_t * 69 dmu_objset_spa(objset_t *os) 70 { 71 return (os->os_spa); 72 } 73 74 zilog_t * 75 dmu_objset_zil(objset_t *os) 76 { 77 return (os->os_zil); 78 } 79 80 dsl_pool_t * 81 dmu_objset_pool(objset_t *os) 82 { 83 dsl_dataset_t *ds; 84 85 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) 86 return (ds->ds_dir->dd_pool); 87 else 88 return (spa_get_dsl(os->os_spa)); 89 } 90 91 dsl_dataset_t * 92 dmu_objset_ds(objset_t *os) 93 { 94 return (os->os_dsl_dataset); 95 } 96 97 dmu_objset_type_t 98 dmu_objset_type(objset_t *os) 99 { 100 return (os->os_phys->os_type); 101 } 102 103 void 104 dmu_objset_name(objset_t *os, char *buf) 105 { 106 dsl_dataset_name(os->os_dsl_dataset, buf); 107 } 108 109 uint64_t 110 dmu_objset_id(objset_t *os) 111 { 112 dsl_dataset_t *ds = os->os_dsl_dataset; 113 114 return (ds ? ds->ds_object : 0); 115 } 116 117 uint64_t 118 dmu_objset_syncprop(objset_t *os) 119 { 120 return (os->os_sync); 121 } 122 123 uint64_t 124 dmu_objset_logbias(objset_t *os) 125 { 126 return (os->os_logbias); 127 } 128 129 static void 130 checksum_changed_cb(void *arg, uint64_t newval) 131 { 132 objset_t *os = arg; 133 134 /* 135 * Inheritance should have been done by now. 136 */ 137 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 138 139 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 140 } 141 142 static void 143 compression_changed_cb(void *arg, uint64_t newval) 144 { 145 objset_t *os = arg; 146 147 /* 148 * Inheritance and range checking should have been done by now. 149 */ 150 ASSERT(newval != ZIO_COMPRESS_INHERIT); 151 152 os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE); 153 } 154 155 static void 156 copies_changed_cb(void *arg, uint64_t newval) 157 { 158 objset_t *os = arg; 159 160 /* 161 * Inheritance and range checking should have been done by now. 162 */ 163 ASSERT(newval > 0); 164 ASSERT(newval <= spa_max_replication(os->os_spa)); 165 166 os->os_copies = newval; 167 } 168 169 static void 170 dedup_changed_cb(void *arg, uint64_t newval) 171 { 172 objset_t *os = arg; 173 spa_t *spa = os->os_spa; 174 enum zio_checksum checksum; 175 176 /* 177 * Inheritance should have been done by now. 178 */ 179 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 180 181 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); 182 183 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; 184 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); 185 } 186 187 static void 188 primary_cache_changed_cb(void *arg, uint64_t newval) 189 { 190 objset_t *os = arg; 191 192 /* 193 * Inheritance and range checking should have been done by now. 194 */ 195 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 196 newval == ZFS_CACHE_METADATA); 197 198 os->os_primary_cache = newval; 199 } 200 201 static void 202 secondary_cache_changed_cb(void *arg, uint64_t newval) 203 { 204 objset_t *os = arg; 205 206 /* 207 * Inheritance and range checking should have been done by now. 208 */ 209 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 210 newval == ZFS_CACHE_METADATA); 211 212 os->os_secondary_cache = newval; 213 } 214 215 static void 216 sync_changed_cb(void *arg, uint64_t newval) 217 { 218 objset_t *os = arg; 219 220 /* 221 * Inheritance and range checking should have been done by now. 222 */ 223 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || 224 newval == ZFS_SYNC_DISABLED); 225 226 os->os_sync = newval; 227 if (os->os_zil) 228 zil_set_sync(os->os_zil, newval); 229 } 230 231 static void 232 logbias_changed_cb(void *arg, uint64_t newval) 233 { 234 objset_t *os = arg; 235 236 ASSERT(newval == ZFS_LOGBIAS_LATENCY || 237 newval == ZFS_LOGBIAS_THROUGHPUT); 238 os->os_logbias = newval; 239 if (os->os_zil) 240 zil_set_logbias(os->os_zil, newval); 241 } 242 243 void 244 dmu_objset_byteswap(void *buf, size_t size) 245 { 246 objset_phys_t *osp = buf; 247 248 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); 249 dnode_byteswap(&osp->os_meta_dnode); 250 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 251 osp->os_type = BSWAP_64(osp->os_type); 252 osp->os_flags = BSWAP_64(osp->os_flags); 253 if (size == sizeof (objset_phys_t)) { 254 dnode_byteswap(&osp->os_userused_dnode); 255 dnode_byteswap(&osp->os_groupused_dnode); 256 } 257 } 258 259 int 260 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 261 objset_t **osp) 262 { 263 objset_t *os; 264 int i, err; 265 266 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 267 268 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); 269 os->os_dsl_dataset = ds; 270 os->os_spa = spa; 271 os->os_rootbp = bp; 272 if (!BP_IS_HOLE(os->os_rootbp)) { 273 uint32_t aflags = ARC_WAIT; 274 zbookmark_t zb; 275 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 276 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 277 278 if (DMU_OS_IS_L2CACHEABLE(os)) 279 aflags |= ARC_L2CACHE; 280 if (DMU_OS_IS_L2COMPRESSIBLE(os)) 281 aflags |= ARC_L2COMPRESS; 282 283 dprintf_bp(os->os_rootbp, "reading %s", ""); 284 err = arc_read(NULL, spa, os->os_rootbp, 285 arc_getbuf_func, &os->os_phys_buf, 286 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 287 if (err != 0) { 288 kmem_free(os, sizeof (objset_t)); 289 /* convert checksum errors into IO errors */ 290 if (err == ECKSUM) 291 err = SET_ERROR(EIO); 292 return (err); 293 } 294 295 /* Increase the blocksize if we are permitted. */ 296 if (spa_version(spa) >= SPA_VERSION_USERSPACE && 297 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { 298 arc_buf_t *buf = arc_buf_alloc(spa, 299 sizeof (objset_phys_t), &os->os_phys_buf, 300 ARC_BUFC_METADATA); 301 bzero(buf->b_data, sizeof (objset_phys_t)); 302 bcopy(os->os_phys_buf->b_data, buf->b_data, 303 arc_buf_size(os->os_phys_buf)); 304 (void) arc_buf_remove_ref(os->os_phys_buf, 305 &os->os_phys_buf); 306 os->os_phys_buf = buf; 307 } 308 309 os->os_phys = os->os_phys_buf->b_data; 310 os->os_flags = os->os_phys->os_flags; 311 } else { 312 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? 313 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; 314 os->os_phys_buf = arc_buf_alloc(spa, size, 315 &os->os_phys_buf, ARC_BUFC_METADATA); 316 os->os_phys = os->os_phys_buf->b_data; 317 bzero(os->os_phys, size); 318 } 319 320 /* 321 * Note: the changed_cb will be called once before the register 322 * func returns, thus changing the checksum/compression from the 323 * default (fletcher2/off). Snapshots don't need to know about 324 * checksum/compression/copies. 325 */ 326 if (ds) { 327 err = dsl_prop_register(ds, 328 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 329 primary_cache_changed_cb, os); 330 if (err == 0) { 331 err = dsl_prop_register(ds, 332 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 333 secondary_cache_changed_cb, os); 334 } 335 if (!dsl_dataset_is_snapshot(ds)) { 336 if (err == 0) { 337 err = dsl_prop_register(ds, 338 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 339 checksum_changed_cb, os); 340 } 341 if (err == 0) { 342 err = dsl_prop_register(ds, 343 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 344 compression_changed_cb, os); 345 } 346 if (err == 0) { 347 err = dsl_prop_register(ds, 348 zfs_prop_to_name(ZFS_PROP_COPIES), 349 copies_changed_cb, os); 350 } 351 if (err == 0) { 352 err = dsl_prop_register(ds, 353 zfs_prop_to_name(ZFS_PROP_DEDUP), 354 dedup_changed_cb, os); 355 } 356 if (err == 0) { 357 err = dsl_prop_register(ds, 358 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 359 logbias_changed_cb, os); 360 } 361 if (err == 0) { 362 err = dsl_prop_register(ds, 363 zfs_prop_to_name(ZFS_PROP_SYNC), 364 sync_changed_cb, os); 365 } 366 } 367 if (err != 0) { 368 VERIFY(arc_buf_remove_ref(os->os_phys_buf, 369 &os->os_phys_buf)); 370 kmem_free(os, sizeof (objset_t)); 371 return (err); 372 } 373 } else if (ds == NULL) { 374 /* It's the meta-objset. */ 375 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 376 os->os_compress = ZIO_COMPRESS_LZJB; 377 os->os_copies = spa_max_replication(spa); 378 os->os_dedup_checksum = ZIO_CHECKSUM_OFF; 379 os->os_dedup_verify = 0; 380 os->os_logbias = 0; 381 os->os_sync = 0; 382 os->os_primary_cache = ZFS_CACHE_ALL; 383 os->os_secondary_cache = ZFS_CACHE_ALL; 384 } 385 386 if (ds == NULL || !dsl_dataset_is_snapshot(ds)) 387 os->os_zil_header = os->os_phys->os_zil_header; 388 os->os_zil = zil_alloc(os, &os->os_zil_header); 389 390 for (i = 0; i < TXG_SIZE; i++) { 391 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), 392 offsetof(dnode_t, dn_dirty_link[i])); 393 list_create(&os->os_free_dnodes[i], sizeof (dnode_t), 394 offsetof(dnode_t, dn_dirty_link[i])); 395 } 396 list_create(&os->os_dnodes, sizeof (dnode_t), 397 offsetof(dnode_t, dn_link)); 398 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 399 offsetof(dmu_buf_impl_t, db_link)); 400 401 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); 402 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 403 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 404 405 DMU_META_DNODE(os) = dnode_special_open(os, 406 &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT, 407 &os->os_meta_dnode); 408 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { 409 DMU_USERUSED_DNODE(os) = dnode_special_open(os, 410 &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT, 411 &os->os_userused_dnode); 412 DMU_GROUPUSED_DNODE(os) = dnode_special_open(os, 413 &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT, 414 &os->os_groupused_dnode); 415 } 416 417 /* 418 * We should be the only thread trying to do this because we 419 * have ds_opening_lock 420 */ 421 if (ds) { 422 mutex_enter(&ds->ds_lock); 423 ASSERT(ds->ds_objset == NULL); 424 ds->ds_objset = os; 425 mutex_exit(&ds->ds_lock); 426 } 427 428 *osp = os; 429 return (0); 430 } 431 432 int 433 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) 434 { 435 int err = 0; 436 437 mutex_enter(&ds->ds_opening_lock); 438 *osp = ds->ds_objset; 439 if (*osp == NULL) { 440 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 441 ds, dsl_dataset_get_blkptr(ds), osp); 442 } 443 mutex_exit(&ds->ds_opening_lock); 444 return (err); 445 } 446 447 /* 448 * Holds the pool while the objset is held. Therefore only one objset 449 * can be held at a time. 450 */ 451 int 452 dmu_objset_hold(const char *name, void *tag, objset_t **osp) 453 { 454 dsl_pool_t *dp; 455 dsl_dataset_t *ds; 456 int err; 457 458 err = dsl_pool_hold(name, tag, &dp); 459 if (err != 0) 460 return (err); 461 err = dsl_dataset_hold(dp, name, tag, &ds); 462 if (err != 0) { 463 dsl_pool_rele(dp, tag); 464 return (err); 465 } 466 467 err = dmu_objset_from_ds(ds, osp); 468 if (err != 0) { 469 dsl_dataset_rele(ds, tag); 470 dsl_pool_rele(dp, tag); 471 } 472 473 return (err); 474 } 475 476 /* 477 * dsl_pool must not be held when this is called. 478 * Upon successful return, there will be a longhold on the dataset, 479 * and the dsl_pool will not be held. 480 */ 481 int 482 dmu_objset_own(const char *name, dmu_objset_type_t type, 483 boolean_t readonly, void *tag, objset_t **osp) 484 { 485 dsl_pool_t *dp; 486 dsl_dataset_t *ds; 487 int err; 488 489 err = dsl_pool_hold(name, FTAG, &dp); 490 if (err != 0) 491 return (err); 492 err = dsl_dataset_own(dp, name, tag, &ds); 493 if (err != 0) { 494 dsl_pool_rele(dp, FTAG); 495 return (err); 496 } 497 498 err = dmu_objset_from_ds(ds, osp); 499 dsl_pool_rele(dp, FTAG); 500 if (err != 0) { 501 dsl_dataset_disown(ds, tag); 502 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { 503 dsl_dataset_disown(ds, tag); 504 return (SET_ERROR(EINVAL)); 505 } else if (!readonly && dsl_dataset_is_snapshot(ds)) { 506 dsl_dataset_disown(ds, tag); 507 return (SET_ERROR(EROFS)); 508 } 509 return (err); 510 } 511 512 void 513 dmu_objset_rele(objset_t *os, void *tag) 514 { 515 dsl_pool_t *dp = dmu_objset_pool(os); 516 dsl_dataset_rele(os->os_dsl_dataset, tag); 517 dsl_pool_rele(dp, tag); 518 } 519 520 void 521 dmu_objset_disown(objset_t *os, void *tag) 522 { 523 dsl_dataset_disown(os->os_dsl_dataset, tag); 524 } 525 526 void 527 dmu_objset_evict_dbufs(objset_t *os) 528 { 529 dnode_t *dn; 530 531 mutex_enter(&os->os_lock); 532 533 /* process the mdn last, since the other dnodes have holds on it */ 534 list_remove(&os->os_dnodes, DMU_META_DNODE(os)); 535 list_insert_tail(&os->os_dnodes, DMU_META_DNODE(os)); 536 537 /* 538 * Find the first dnode with holds. We have to do this dance 539 * because dnode_add_ref() only works if you already have a 540 * hold. If there are no holds then it has no dbufs so OK to 541 * skip. 542 */ 543 for (dn = list_head(&os->os_dnodes); 544 dn && !dnode_add_ref(dn, FTAG); 545 dn = list_next(&os->os_dnodes, dn)) 546 continue; 547 548 while (dn) { 549 dnode_t *next_dn = dn; 550 551 do { 552 next_dn = list_next(&os->os_dnodes, next_dn); 553 } while (next_dn && !dnode_add_ref(next_dn, FTAG)); 554 555 mutex_exit(&os->os_lock); 556 dnode_evict_dbufs(dn); 557 dnode_rele(dn, FTAG); 558 mutex_enter(&os->os_lock); 559 dn = next_dn; 560 } 561 mutex_exit(&os->os_lock); 562 } 563 564 void 565 dmu_objset_evict(objset_t *os) 566 { 567 dsl_dataset_t *ds = os->os_dsl_dataset; 568 569 for (int t = 0; t < TXG_SIZE; t++) 570 ASSERT(!dmu_objset_is_dirty(os, t)); 571 572 if (ds) { 573 if (!dsl_dataset_is_snapshot(ds)) { 574 VERIFY0(dsl_prop_unregister(ds, 575 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 576 checksum_changed_cb, os)); 577 VERIFY0(dsl_prop_unregister(ds, 578 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 579 compression_changed_cb, os)); 580 VERIFY0(dsl_prop_unregister(ds, 581 zfs_prop_to_name(ZFS_PROP_COPIES), 582 copies_changed_cb, os)); 583 VERIFY0(dsl_prop_unregister(ds, 584 zfs_prop_to_name(ZFS_PROP_DEDUP), 585 dedup_changed_cb, os)); 586 VERIFY0(dsl_prop_unregister(ds, 587 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 588 logbias_changed_cb, os)); 589 VERIFY0(dsl_prop_unregister(ds, 590 zfs_prop_to_name(ZFS_PROP_SYNC), 591 sync_changed_cb, os)); 592 } 593 VERIFY0(dsl_prop_unregister(ds, 594 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 595 primary_cache_changed_cb, os)); 596 VERIFY0(dsl_prop_unregister(ds, 597 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 598 secondary_cache_changed_cb, os)); 599 } 600 601 if (os->os_sa) 602 sa_tear_down(os); 603 604 dmu_objset_evict_dbufs(os); 605 606 dnode_special_close(&os->os_meta_dnode); 607 if (DMU_USERUSED_DNODE(os)) { 608 dnode_special_close(&os->os_userused_dnode); 609 dnode_special_close(&os->os_groupused_dnode); 610 } 611 zil_free(os->os_zil); 612 613 ASSERT3P(list_head(&os->os_dnodes), ==, NULL); 614 615 VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf)); 616 617 /* 618 * This is a barrier to prevent the objset from going away in 619 * dnode_move() until we can safely ensure that the objset is still in 620 * use. We consider the objset valid before the barrier and invalid 621 * after the barrier. 622 */ 623 rw_enter(&os_lock, RW_READER); 624 rw_exit(&os_lock); 625 626 mutex_destroy(&os->os_lock); 627 mutex_destroy(&os->os_obj_lock); 628 mutex_destroy(&os->os_user_ptr_lock); 629 kmem_free(os, sizeof (objset_t)); 630 } 631 632 timestruc_t 633 dmu_objset_snap_cmtime(objset_t *os) 634 { 635 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); 636 } 637 638 /* called from dsl for meta-objset */ 639 objset_t * 640 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 641 dmu_objset_type_t type, dmu_tx_t *tx) 642 { 643 objset_t *os; 644 dnode_t *mdn; 645 646 ASSERT(dmu_tx_is_syncing(tx)); 647 648 if (ds != NULL) 649 VERIFY0(dmu_objset_from_ds(ds, &os)); 650 else 651 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); 652 653 mdn = DMU_META_DNODE(os); 654 655 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 656 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 657 658 /* 659 * We don't want to have to increase the meta-dnode's nlevels 660 * later, because then we could do it in quescing context while 661 * we are also accessing it in open context. 662 * 663 * This precaution is not necessary for the MOS (ds == NULL), 664 * because the MOS is only updated in syncing context. 665 * This is most fortunate: the MOS is the only objset that 666 * needs to be synced multiple times as spa_sync() iterates 667 * to convergence, so minimizing its dn_nlevels matters. 668 */ 669 if (ds != NULL) { 670 int levels = 1; 671 672 /* 673 * Determine the number of levels necessary for the meta-dnode 674 * to contain DN_MAX_OBJECT dnodes. 675 */ 676 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + 677 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 678 DN_MAX_OBJECT * sizeof (dnode_phys_t)) 679 levels++; 680 681 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 682 mdn->dn_nlevels = levels; 683 } 684 685 ASSERT(type != DMU_OST_NONE); 686 ASSERT(type != DMU_OST_ANY); 687 ASSERT(type < DMU_OST_NUMTYPES); 688 os->os_phys->os_type = type; 689 if (dmu_objset_userused_enabled(os)) { 690 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 691 os->os_flags = os->os_phys->os_flags; 692 } 693 694 dsl_dataset_dirty(ds, tx); 695 696 return (os); 697 } 698 699 typedef struct dmu_objset_create_arg { 700 const char *doca_name; 701 cred_t *doca_cred; 702 void (*doca_userfunc)(objset_t *os, void *arg, 703 cred_t *cr, dmu_tx_t *tx); 704 void *doca_userarg; 705 dmu_objset_type_t doca_type; 706 uint64_t doca_flags; 707 } dmu_objset_create_arg_t; 708 709 /*ARGSUSED*/ 710 static int 711 dmu_objset_create_check(void *arg, dmu_tx_t *tx) 712 { 713 dmu_objset_create_arg_t *doca = arg; 714 dsl_pool_t *dp = dmu_tx_pool(tx); 715 dsl_dir_t *pdd; 716 const char *tail; 717 int error; 718 719 if (strchr(doca->doca_name, '@') != NULL) 720 return (SET_ERROR(EINVAL)); 721 722 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); 723 if (error != 0) 724 return (error); 725 if (tail == NULL) { 726 dsl_dir_rele(pdd, FTAG); 727 return (SET_ERROR(EEXIST)); 728 } 729 dsl_dir_rele(pdd, FTAG); 730 731 return (0); 732 } 733 734 static void 735 dmu_objset_create_sync(void *arg, dmu_tx_t *tx) 736 { 737 dmu_objset_create_arg_t *doca = arg; 738 dsl_pool_t *dp = dmu_tx_pool(tx); 739 dsl_dir_t *pdd; 740 const char *tail; 741 dsl_dataset_t *ds; 742 uint64_t obj; 743 blkptr_t *bp; 744 objset_t *os; 745 746 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); 747 748 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, 749 doca->doca_cred, tx); 750 751 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 752 bp = dsl_dataset_get_blkptr(ds); 753 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, 754 ds, bp, doca->doca_type, tx); 755 756 if (doca->doca_userfunc != NULL) { 757 doca->doca_userfunc(os, doca->doca_userarg, 758 doca->doca_cred, tx); 759 } 760 761 spa_history_log_internal_ds(ds, "create", tx, ""); 762 dsl_dataset_rele(ds, FTAG); 763 dsl_dir_rele(pdd, FTAG); 764 } 765 766 int 767 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 768 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 769 { 770 dmu_objset_create_arg_t doca; 771 772 doca.doca_name = name; 773 doca.doca_cred = CRED(); 774 doca.doca_flags = flags; 775 doca.doca_userfunc = func; 776 doca.doca_userarg = arg; 777 doca.doca_type = type; 778 779 return (dsl_sync_task(name, 780 dmu_objset_create_check, dmu_objset_create_sync, &doca, 5)); 781 } 782 783 typedef struct dmu_objset_clone_arg { 784 const char *doca_clone; 785 const char *doca_origin; 786 cred_t *doca_cred; 787 } dmu_objset_clone_arg_t; 788 789 /*ARGSUSED*/ 790 static int 791 dmu_objset_clone_check(void *arg, dmu_tx_t *tx) 792 { 793 dmu_objset_clone_arg_t *doca = arg; 794 dsl_dir_t *pdd; 795 const char *tail; 796 int error; 797 dsl_dataset_t *origin; 798 dsl_pool_t *dp = dmu_tx_pool(tx); 799 800 if (strchr(doca->doca_clone, '@') != NULL) 801 return (SET_ERROR(EINVAL)); 802 803 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); 804 if (error != 0) 805 return (error); 806 if (tail == NULL) { 807 dsl_dir_rele(pdd, FTAG); 808 return (SET_ERROR(EEXIST)); 809 } 810 /* You can't clone across pools. */ 811 if (pdd->dd_pool != dp) { 812 dsl_dir_rele(pdd, FTAG); 813 return (SET_ERROR(EXDEV)); 814 } 815 dsl_dir_rele(pdd, FTAG); 816 817 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); 818 if (error != 0) 819 return (error); 820 821 /* You can't clone across pools. */ 822 if (origin->ds_dir->dd_pool != dp) { 823 dsl_dataset_rele(origin, FTAG); 824 return (SET_ERROR(EXDEV)); 825 } 826 827 /* You can only clone snapshots, not the head datasets. */ 828 if (!dsl_dataset_is_snapshot(origin)) { 829 dsl_dataset_rele(origin, FTAG); 830 return (SET_ERROR(EINVAL)); 831 } 832 dsl_dataset_rele(origin, FTAG); 833 834 return (0); 835 } 836 837 static void 838 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) 839 { 840 dmu_objset_clone_arg_t *doca = arg; 841 dsl_pool_t *dp = dmu_tx_pool(tx); 842 dsl_dir_t *pdd; 843 const char *tail; 844 dsl_dataset_t *origin, *ds; 845 uint64_t obj; 846 char namebuf[MAXNAMELEN]; 847 848 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); 849 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); 850 851 obj = dsl_dataset_create_sync(pdd, tail, origin, 0, 852 doca->doca_cred, tx); 853 854 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 855 dsl_dataset_name(origin, namebuf); 856 spa_history_log_internal_ds(ds, "clone", tx, 857 "origin=%s (%llu)", namebuf, origin->ds_object); 858 dsl_dataset_rele(ds, FTAG); 859 dsl_dataset_rele(origin, FTAG); 860 dsl_dir_rele(pdd, FTAG); 861 } 862 863 int 864 dmu_objset_clone(const char *clone, const char *origin) 865 { 866 dmu_objset_clone_arg_t doca; 867 868 doca.doca_clone = clone; 869 doca.doca_origin = origin; 870 doca.doca_cred = CRED(); 871 872 return (dsl_sync_task(clone, 873 dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 5)); 874 } 875 876 int 877 dmu_objset_snapshot_one(const char *fsname, const char *snapname) 878 { 879 int err; 880 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); 881 nvlist_t *snaps = fnvlist_alloc(); 882 883 fnvlist_add_boolean(snaps, longsnap); 884 strfree(longsnap); 885 err = dsl_dataset_snapshot(snaps, NULL, NULL); 886 fnvlist_free(snaps); 887 return (err); 888 } 889 890 static void 891 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) 892 { 893 dnode_t *dn; 894 895 while (dn = list_head(list)) { 896 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 897 ASSERT(dn->dn_dbuf->db_data_pending); 898 /* 899 * Initialize dn_zio outside dnode_sync() because the 900 * meta-dnode needs to set it ouside dnode_sync(). 901 */ 902 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 903 ASSERT(dn->dn_zio); 904 905 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 906 list_remove(list, dn); 907 908 if (newlist) { 909 (void) dnode_add_ref(dn, newlist); 910 list_insert_tail(newlist, dn); 911 } 912 913 dnode_sync(dn, tx); 914 } 915 } 916 917 /* ARGSUSED */ 918 static void 919 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) 920 { 921 blkptr_t *bp = zio->io_bp; 922 objset_t *os = arg; 923 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 924 925 ASSERT3P(bp, ==, os->os_rootbp); 926 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); 927 ASSERT0(BP_GET_LEVEL(bp)); 928 929 /* 930 * Update rootbp fill count: it should be the number of objects 931 * allocated in the object set (not counting the "special" 932 * objects that are stored in the objset_phys_t -- the meta 933 * dnode and user/group accounting objects). 934 */ 935 bp->blk_fill = 0; 936 for (int i = 0; i < dnp->dn_nblkptr; i++) 937 bp->blk_fill += dnp->dn_blkptr[i].blk_fill; 938 } 939 940 /* ARGSUSED */ 941 static void 942 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) 943 { 944 blkptr_t *bp = zio->io_bp; 945 blkptr_t *bp_orig = &zio->io_bp_orig; 946 objset_t *os = arg; 947 948 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 949 ASSERT(BP_EQUAL(bp, bp_orig)); 950 } else { 951 dsl_dataset_t *ds = os->os_dsl_dataset; 952 dmu_tx_t *tx = os->os_synctx; 953 954 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 955 dsl_dataset_block_born(ds, bp, tx); 956 } 957 } 958 959 /* called from dsl */ 960 void 961 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) 962 { 963 int txgoff; 964 zbookmark_t zb; 965 zio_prop_t zp; 966 zio_t *zio; 967 list_t *list; 968 list_t *newlist = NULL; 969 dbuf_dirty_record_t *dr; 970 971 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 972 973 ASSERT(dmu_tx_is_syncing(tx)); 974 /* XXX the write_done callback should really give us the tx... */ 975 os->os_synctx = tx; 976 977 if (os->os_dsl_dataset == NULL) { 978 /* 979 * This is the MOS. If we have upgraded, 980 * spa_max_replication() could change, so reset 981 * os_copies here. 982 */ 983 os->os_copies = spa_max_replication(os->os_spa); 984 } 985 986 /* 987 * Create the root block IO 988 */ 989 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 990 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 991 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 992 arc_release(os->os_phys_buf, &os->os_phys_buf); 993 994 dmu_write_policy(os, NULL, 0, 0, &zp); 995 996 zio = arc_write(pio, os->os_spa, tx->tx_txg, 997 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), 998 DMU_OS_IS_L2COMPRESSIBLE(os), &zp, dmu_objset_write_ready, 999 dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE, 1000 ZIO_FLAG_MUSTSUCCEED, &zb); 1001 1002 /* 1003 * Sync special dnodes - the parent IO for the sync is the root block 1004 */ 1005 DMU_META_DNODE(os)->dn_zio = zio; 1006 dnode_sync(DMU_META_DNODE(os), tx); 1007 1008 os->os_phys->os_flags = os->os_flags; 1009 1010 if (DMU_USERUSED_DNODE(os) && 1011 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { 1012 DMU_USERUSED_DNODE(os)->dn_zio = zio; 1013 dnode_sync(DMU_USERUSED_DNODE(os), tx); 1014 DMU_GROUPUSED_DNODE(os)->dn_zio = zio; 1015 dnode_sync(DMU_GROUPUSED_DNODE(os), tx); 1016 } 1017 1018 txgoff = tx->tx_txg & TXG_MASK; 1019 1020 if (dmu_objset_userused_enabled(os)) { 1021 newlist = &os->os_synced_dnodes; 1022 /* 1023 * We must create the list here because it uses the 1024 * dn_dirty_link[] of this txg. 1025 */ 1026 list_create(newlist, sizeof (dnode_t), 1027 offsetof(dnode_t, dn_dirty_link[txgoff])); 1028 } 1029 1030 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); 1031 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); 1032 1033 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; 1034 while (dr = list_head(list)) { 1035 ASSERT0(dr->dr_dbuf->db_level); 1036 list_remove(list, dr); 1037 if (dr->dr_zio) 1038 zio_nowait(dr->dr_zio); 1039 } 1040 /* 1041 * Free intent log blocks up to this tx. 1042 */ 1043 zil_sync(os->os_zil, tx); 1044 os->os_phys->os_zil_header = os->os_zil_header; 1045 zio_nowait(zio); 1046 } 1047 1048 boolean_t 1049 dmu_objset_is_dirty(objset_t *os, uint64_t txg) 1050 { 1051 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) || 1052 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK])); 1053 } 1054 1055 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; 1056 1057 void 1058 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) 1059 { 1060 used_cbs[ost] = cb; 1061 } 1062 1063 boolean_t 1064 dmu_objset_userused_enabled(objset_t *os) 1065 { 1066 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && 1067 used_cbs[os->os_phys->os_type] != NULL && 1068 DMU_USERUSED_DNODE(os) != NULL); 1069 } 1070 1071 static void 1072 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags, 1073 uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx) 1074 { 1075 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) { 1076 int64_t delta = DNODE_SIZE + used; 1077 if (subtract) 1078 delta = -delta; 1079 VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT, 1080 user, delta, tx)); 1081 VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT, 1082 group, delta, tx)); 1083 } 1084 } 1085 1086 void 1087 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) 1088 { 1089 dnode_t *dn; 1090 list_t *list = &os->os_synced_dnodes; 1091 1092 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os)); 1093 1094 while (dn = list_head(list)) { 1095 int flags; 1096 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); 1097 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || 1098 dn->dn_phys->dn_flags & 1099 DNODE_FLAG_USERUSED_ACCOUNTED); 1100 1101 /* Allocate the user/groupused objects if necessary. */ 1102 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { 1103 VERIFY(0 == zap_create_claim(os, 1104 DMU_USERUSED_OBJECT, 1105 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1106 VERIFY(0 == zap_create_claim(os, 1107 DMU_GROUPUSED_OBJECT, 1108 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1109 } 1110 1111 /* 1112 * We intentionally modify the zap object even if the 1113 * net delta is zero. Otherwise 1114 * the block of the zap obj could be shared between 1115 * datasets but need to be different between them after 1116 * a bprewrite. 1117 */ 1118 1119 flags = dn->dn_id_flags; 1120 ASSERT(flags); 1121 if (flags & DN_ID_OLD_EXIST) { 1122 do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags, 1123 dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx); 1124 } 1125 if (flags & DN_ID_NEW_EXIST) { 1126 do_userquota_update(os, DN_USED_BYTES(dn->dn_phys), 1127 dn->dn_phys->dn_flags, dn->dn_newuid, 1128 dn->dn_newgid, B_FALSE, tx); 1129 } 1130 1131 mutex_enter(&dn->dn_mtx); 1132 dn->dn_oldused = 0; 1133 dn->dn_oldflags = 0; 1134 if (dn->dn_id_flags & DN_ID_NEW_EXIST) { 1135 dn->dn_olduid = dn->dn_newuid; 1136 dn->dn_oldgid = dn->dn_newgid; 1137 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1138 if (dn->dn_bonuslen == 0) 1139 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1140 else 1141 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1142 } 1143 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); 1144 mutex_exit(&dn->dn_mtx); 1145 1146 list_remove(list, dn); 1147 dnode_rele(dn, list); 1148 } 1149 } 1150 1151 /* 1152 * Returns a pointer to data to find uid/gid from 1153 * 1154 * If a dirty record for transaction group that is syncing can't 1155 * be found then NULL is returned. In the NULL case it is assumed 1156 * the uid/gid aren't changing. 1157 */ 1158 static void * 1159 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) 1160 { 1161 dbuf_dirty_record_t *dr, **drp; 1162 void *data; 1163 1164 if (db->db_dirtycnt == 0) 1165 return (db->db.db_data); /* Nothing is changing */ 1166 1167 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1168 if (dr->dr_txg == tx->tx_txg) 1169 break; 1170 1171 if (dr == NULL) { 1172 data = NULL; 1173 } else { 1174 dnode_t *dn; 1175 1176 DB_DNODE_ENTER(dr->dr_dbuf); 1177 dn = DB_DNODE(dr->dr_dbuf); 1178 1179 if (dn->dn_bonuslen == 0 && 1180 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) 1181 data = dr->dt.dl.dr_data->b_data; 1182 else 1183 data = dr->dt.dl.dr_data; 1184 1185 DB_DNODE_EXIT(dr->dr_dbuf); 1186 } 1187 1188 return (data); 1189 } 1190 1191 void 1192 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) 1193 { 1194 objset_t *os = dn->dn_objset; 1195 void *data = NULL; 1196 dmu_buf_impl_t *db = NULL; 1197 uint64_t *user = NULL; 1198 uint64_t *group = NULL; 1199 int flags = dn->dn_id_flags; 1200 int error; 1201 boolean_t have_spill = B_FALSE; 1202 1203 if (!dmu_objset_userused_enabled(dn->dn_objset)) 1204 return; 1205 1206 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| 1207 DN_ID_CHKED_SPILL))) 1208 return; 1209 1210 if (before && dn->dn_bonuslen != 0) 1211 data = DN_BONUS(dn->dn_phys); 1212 else if (!before && dn->dn_bonuslen != 0) { 1213 if (dn->dn_bonus) { 1214 db = dn->dn_bonus; 1215 mutex_enter(&db->db_mtx); 1216 data = dmu_objset_userquota_find_data(db, tx); 1217 } else { 1218 data = DN_BONUS(dn->dn_phys); 1219 } 1220 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { 1221 int rf = 0; 1222 1223 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) 1224 rf |= DB_RF_HAVESTRUCT; 1225 error = dmu_spill_hold_by_dnode(dn, 1226 rf | DB_RF_MUST_SUCCEED, 1227 FTAG, (dmu_buf_t **)&db); 1228 ASSERT(error == 0); 1229 mutex_enter(&db->db_mtx); 1230 data = (before) ? db->db.db_data : 1231 dmu_objset_userquota_find_data(db, tx); 1232 have_spill = B_TRUE; 1233 } else { 1234 mutex_enter(&dn->dn_mtx); 1235 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1236 mutex_exit(&dn->dn_mtx); 1237 return; 1238 } 1239 1240 if (before) { 1241 ASSERT(data); 1242 user = &dn->dn_olduid; 1243 group = &dn->dn_oldgid; 1244 } else if (data) { 1245 user = &dn->dn_newuid; 1246 group = &dn->dn_newgid; 1247 } 1248 1249 /* 1250 * Must always call the callback in case the object 1251 * type has changed and that type isn't an object type to track 1252 */ 1253 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, 1254 user, group); 1255 1256 /* 1257 * Preserve existing uid/gid when the callback can't determine 1258 * what the new uid/gid are and the callback returned EEXIST. 1259 * The EEXIST error tells us to just use the existing uid/gid. 1260 * If we don't know what the old values are then just assign 1261 * them to 0, since that is a new file being created. 1262 */ 1263 if (!before && data == NULL && error == EEXIST) { 1264 if (flags & DN_ID_OLD_EXIST) { 1265 dn->dn_newuid = dn->dn_olduid; 1266 dn->dn_newgid = dn->dn_oldgid; 1267 } else { 1268 dn->dn_newuid = 0; 1269 dn->dn_newgid = 0; 1270 } 1271 error = 0; 1272 } 1273 1274 if (db) 1275 mutex_exit(&db->db_mtx); 1276 1277 mutex_enter(&dn->dn_mtx); 1278 if (error == 0 && before) 1279 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1280 if (error == 0 && !before) 1281 dn->dn_id_flags |= DN_ID_NEW_EXIST; 1282 1283 if (have_spill) { 1284 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1285 } else { 1286 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1287 } 1288 mutex_exit(&dn->dn_mtx); 1289 if (have_spill) 1290 dmu_buf_rele((dmu_buf_t *)db, FTAG); 1291 } 1292 1293 boolean_t 1294 dmu_objset_userspace_present(objset_t *os) 1295 { 1296 return (os->os_phys->os_flags & 1297 OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1298 } 1299 1300 int 1301 dmu_objset_userspace_upgrade(objset_t *os) 1302 { 1303 uint64_t obj; 1304 int err = 0; 1305 1306 if (dmu_objset_userspace_present(os)) 1307 return (0); 1308 if (!dmu_objset_userused_enabled(os)) 1309 return (SET_ERROR(ENOTSUP)); 1310 if (dmu_objset_is_snapshot(os)) 1311 return (SET_ERROR(EINVAL)); 1312 1313 /* 1314 * We simply need to mark every object dirty, so that it will be 1315 * synced out and now accounted. If this is called 1316 * concurrently, or if we already did some work before crashing, 1317 * that's fine, since we track each object's accounted state 1318 * independently. 1319 */ 1320 1321 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 1322 dmu_tx_t *tx; 1323 dmu_buf_t *db; 1324 int objerr; 1325 1326 if (issig(JUSTLOOKING) && issig(FORREAL)) 1327 return (SET_ERROR(EINTR)); 1328 1329 objerr = dmu_bonus_hold(os, obj, FTAG, &db); 1330 if (objerr != 0) 1331 continue; 1332 tx = dmu_tx_create(os); 1333 dmu_tx_hold_bonus(tx, obj); 1334 objerr = dmu_tx_assign(tx, TXG_WAIT); 1335 if (objerr != 0) { 1336 dmu_tx_abort(tx); 1337 continue; 1338 } 1339 dmu_buf_will_dirty(db, tx); 1340 dmu_buf_rele(db, FTAG); 1341 dmu_tx_commit(tx); 1342 } 1343 1344 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 1345 txg_wait_synced(dmu_objset_pool(os), 0); 1346 return (0); 1347 } 1348 1349 void 1350 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 1351 uint64_t *usedobjsp, uint64_t *availobjsp) 1352 { 1353 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, 1354 usedobjsp, availobjsp); 1355 } 1356 1357 uint64_t 1358 dmu_objset_fsid_guid(objset_t *os) 1359 { 1360 return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); 1361 } 1362 1363 void 1364 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 1365 { 1366 stat->dds_type = os->os_phys->os_type; 1367 if (os->os_dsl_dataset) 1368 dsl_dataset_fast_stat(os->os_dsl_dataset, stat); 1369 } 1370 1371 void 1372 dmu_objset_stats(objset_t *os, nvlist_t *nv) 1373 { 1374 ASSERT(os->os_dsl_dataset || 1375 os->os_phys->os_type == DMU_OST_META); 1376 1377 if (os->os_dsl_dataset != NULL) 1378 dsl_dataset_stats(os->os_dsl_dataset, nv); 1379 1380 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 1381 os->os_phys->os_type); 1382 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, 1383 dmu_objset_userspace_present(os)); 1384 } 1385 1386 int 1387 dmu_objset_is_snapshot(objset_t *os) 1388 { 1389 if (os->os_dsl_dataset != NULL) 1390 return (dsl_dataset_is_snapshot(os->os_dsl_dataset)); 1391 else 1392 return (B_FALSE); 1393 } 1394 1395 int 1396 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 1397 boolean_t *conflict) 1398 { 1399 dsl_dataset_t *ds = os->os_dsl_dataset; 1400 uint64_t ignored; 1401 1402 if (ds->ds_phys->ds_snapnames_zapobj == 0) 1403 return (SET_ERROR(ENOENT)); 1404 1405 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 1406 ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST, 1407 real, maxlen, conflict)); 1408 } 1409 1410 int 1411 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 1412 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 1413 { 1414 dsl_dataset_t *ds = os->os_dsl_dataset; 1415 zap_cursor_t cursor; 1416 zap_attribute_t attr; 1417 1418 ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); 1419 1420 if (ds->ds_phys->ds_snapnames_zapobj == 0) 1421 return (SET_ERROR(ENOENT)); 1422 1423 zap_cursor_init_serialized(&cursor, 1424 ds->ds_dir->dd_pool->dp_meta_objset, 1425 ds->ds_phys->ds_snapnames_zapobj, *offp); 1426 1427 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1428 zap_cursor_fini(&cursor); 1429 return (SET_ERROR(ENOENT)); 1430 } 1431 1432 if (strlen(attr.za_name) + 1 > namelen) { 1433 zap_cursor_fini(&cursor); 1434 return (SET_ERROR(ENAMETOOLONG)); 1435 } 1436 1437 (void) strcpy(name, attr.za_name); 1438 if (idp) 1439 *idp = attr.za_first_integer; 1440 if (case_conflict) 1441 *case_conflict = attr.za_normalization_conflict; 1442 zap_cursor_advance(&cursor); 1443 *offp = zap_cursor_serialize(&cursor); 1444 zap_cursor_fini(&cursor); 1445 1446 return (0); 1447 } 1448 1449 int 1450 dmu_dir_list_next(objset_t *os, int namelen, char *name, 1451 uint64_t *idp, uint64_t *offp) 1452 { 1453 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; 1454 zap_cursor_t cursor; 1455 zap_attribute_t attr; 1456 1457 /* there is no next dir on a snapshot! */ 1458 if (os->os_dsl_dataset->ds_object != 1459 dd->dd_phys->dd_head_dataset_obj) 1460 return (SET_ERROR(ENOENT)); 1461 1462 zap_cursor_init_serialized(&cursor, 1463 dd->dd_pool->dp_meta_objset, 1464 dd->dd_phys->dd_child_dir_zapobj, *offp); 1465 1466 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1467 zap_cursor_fini(&cursor); 1468 return (SET_ERROR(ENOENT)); 1469 } 1470 1471 if (strlen(attr.za_name) + 1 > namelen) { 1472 zap_cursor_fini(&cursor); 1473 return (SET_ERROR(ENAMETOOLONG)); 1474 } 1475 1476 (void) strcpy(name, attr.za_name); 1477 if (idp) 1478 *idp = attr.za_first_integer; 1479 zap_cursor_advance(&cursor); 1480 *offp = zap_cursor_serialize(&cursor); 1481 zap_cursor_fini(&cursor); 1482 1483 return (0); 1484 } 1485 1486 /* 1487 * Find objsets under and including ddobj, call func(ds) on each. 1488 */ 1489 int 1490 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, 1491 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) 1492 { 1493 dsl_dir_t *dd; 1494 dsl_dataset_t *ds; 1495 zap_cursor_t zc; 1496 zap_attribute_t *attr; 1497 uint64_t thisobj; 1498 int err; 1499 1500 ASSERT(dsl_pool_config_held(dp)); 1501 1502 err = dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd); 1503 if (err != 0) 1504 return (err); 1505 1506 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1507 if (dd->dd_myname[0] == '$') { 1508 dsl_dir_rele(dd, FTAG); 1509 return (0); 1510 } 1511 1512 thisobj = dd->dd_phys->dd_head_dataset_obj; 1513 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1514 1515 /* 1516 * Iterate over all children. 1517 */ 1518 if (flags & DS_FIND_CHILDREN) { 1519 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1520 dd->dd_phys->dd_child_dir_zapobj); 1521 zap_cursor_retrieve(&zc, attr) == 0; 1522 (void) zap_cursor_advance(&zc)) { 1523 ASSERT3U(attr->za_integer_length, ==, 1524 sizeof (uint64_t)); 1525 ASSERT3U(attr->za_num_integers, ==, 1); 1526 1527 err = dmu_objset_find_dp(dp, attr->za_first_integer, 1528 func, arg, flags); 1529 if (err != 0) 1530 break; 1531 } 1532 zap_cursor_fini(&zc); 1533 1534 if (err != 0) { 1535 dsl_dir_rele(dd, FTAG); 1536 kmem_free(attr, sizeof (zap_attribute_t)); 1537 return (err); 1538 } 1539 } 1540 1541 /* 1542 * Iterate over all snapshots. 1543 */ 1544 if (flags & DS_FIND_SNAPSHOTS) { 1545 dsl_dataset_t *ds; 1546 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1547 1548 if (err == 0) { 1549 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 1550 dsl_dataset_rele(ds, FTAG); 1551 1552 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1553 zap_cursor_retrieve(&zc, attr) == 0; 1554 (void) zap_cursor_advance(&zc)) { 1555 ASSERT3U(attr->za_integer_length, ==, 1556 sizeof (uint64_t)); 1557 ASSERT3U(attr->za_num_integers, ==, 1); 1558 1559 err = dsl_dataset_hold_obj(dp, 1560 attr->za_first_integer, FTAG, &ds); 1561 if (err != 0) 1562 break; 1563 err = func(dp, ds, arg); 1564 dsl_dataset_rele(ds, FTAG); 1565 if (err != 0) 1566 break; 1567 } 1568 zap_cursor_fini(&zc); 1569 } 1570 } 1571 1572 dsl_dir_rele(dd, FTAG); 1573 kmem_free(attr, sizeof (zap_attribute_t)); 1574 1575 if (err != 0) 1576 return (err); 1577 1578 /* 1579 * Apply to self. 1580 */ 1581 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1582 if (err != 0) 1583 return (err); 1584 err = func(dp, ds, arg); 1585 dsl_dataset_rele(ds, FTAG); 1586 return (err); 1587 } 1588 1589 /* 1590 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 1591 * The dp_config_rwlock must not be held when this is called, and it 1592 * will not be held when the callback is called. 1593 * Therefore this function should only be used when the pool is not changing 1594 * (e.g. in syncing context), or the callback can deal with the possible races. 1595 */ 1596 static int 1597 dmu_objset_find_impl(spa_t *spa, const char *name, 1598 int func(const char *, void *), void *arg, int flags) 1599 { 1600 dsl_dir_t *dd; 1601 dsl_pool_t *dp = spa_get_dsl(spa); 1602 dsl_dataset_t *ds; 1603 zap_cursor_t zc; 1604 zap_attribute_t *attr; 1605 char *child; 1606 uint64_t thisobj; 1607 int err; 1608 1609 dsl_pool_config_enter(dp, FTAG); 1610 1611 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); 1612 if (err != 0) { 1613 dsl_pool_config_exit(dp, FTAG); 1614 return (err); 1615 } 1616 1617 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1618 if (dd->dd_myname[0] == '$') { 1619 dsl_dir_rele(dd, FTAG); 1620 dsl_pool_config_exit(dp, FTAG); 1621 return (0); 1622 } 1623 1624 thisobj = dd->dd_phys->dd_head_dataset_obj; 1625 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1626 1627 /* 1628 * Iterate over all children. 1629 */ 1630 if (flags & DS_FIND_CHILDREN) { 1631 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1632 dd->dd_phys->dd_child_dir_zapobj); 1633 zap_cursor_retrieve(&zc, attr) == 0; 1634 (void) zap_cursor_advance(&zc)) { 1635 ASSERT3U(attr->za_integer_length, ==, 1636 sizeof (uint64_t)); 1637 ASSERT3U(attr->za_num_integers, ==, 1); 1638 1639 child = kmem_asprintf("%s/%s", name, attr->za_name); 1640 dsl_pool_config_exit(dp, FTAG); 1641 err = dmu_objset_find_impl(spa, child, 1642 func, arg, flags); 1643 dsl_pool_config_enter(dp, FTAG); 1644 strfree(child); 1645 if (err != 0) 1646 break; 1647 } 1648 zap_cursor_fini(&zc); 1649 1650 if (err != 0) { 1651 dsl_dir_rele(dd, FTAG); 1652 dsl_pool_config_exit(dp, FTAG); 1653 kmem_free(attr, sizeof (zap_attribute_t)); 1654 return (err); 1655 } 1656 } 1657 1658 /* 1659 * Iterate over all snapshots. 1660 */ 1661 if (flags & DS_FIND_SNAPSHOTS) { 1662 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1663 1664 if (err == 0) { 1665 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 1666 dsl_dataset_rele(ds, FTAG); 1667 1668 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1669 zap_cursor_retrieve(&zc, attr) == 0; 1670 (void) zap_cursor_advance(&zc)) { 1671 ASSERT3U(attr->za_integer_length, ==, 1672 sizeof (uint64_t)); 1673 ASSERT3U(attr->za_num_integers, ==, 1); 1674 1675 child = kmem_asprintf("%s@%s", 1676 name, attr->za_name); 1677 dsl_pool_config_exit(dp, FTAG); 1678 err = func(child, arg); 1679 dsl_pool_config_enter(dp, FTAG); 1680 strfree(child); 1681 if (err != 0) 1682 break; 1683 } 1684 zap_cursor_fini(&zc); 1685 } 1686 } 1687 1688 dsl_dir_rele(dd, FTAG); 1689 kmem_free(attr, sizeof (zap_attribute_t)); 1690 dsl_pool_config_exit(dp, FTAG); 1691 1692 if (err != 0) 1693 return (err); 1694 1695 /* Apply to self. */ 1696 return (func(name, arg)); 1697 } 1698 1699 /* 1700 * See comment above dmu_objset_find_impl(). 1701 */ 1702 int 1703 dmu_objset_find(char *name, int func(const char *, void *), void *arg, 1704 int flags) 1705 { 1706 spa_t *spa; 1707 int error; 1708 1709 error = spa_open(name, &spa, FTAG); 1710 if (error != 0) 1711 return (error); 1712 error = dmu_objset_find_impl(spa, name, func, arg, flags); 1713 spa_close(spa, FTAG); 1714 return (error); 1715 } 1716 1717 void 1718 dmu_objset_set_user(objset_t *os, void *user_ptr) 1719 { 1720 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1721 os->os_user_ptr = user_ptr; 1722 } 1723 1724 void * 1725 dmu_objset_get_user(objset_t *os) 1726 { 1727 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1728 return (os->os_user_ptr); 1729 } 1730 1731 /* 1732 * Determine name of filesystem, given name of snapshot. 1733 * buf must be at least MAXNAMELEN bytes 1734 */ 1735 int 1736 dmu_fsname(const char *snapname, char *buf) 1737 { 1738 char *atp = strchr(snapname, '@'); 1739 if (atp == NULL) 1740 return (SET_ERROR(EINVAL)); 1741 if (atp - snapname >= MAXNAMELEN) 1742 return (SET_ERROR(ENAMETOOLONG)); 1743 (void) strlcpy(buf, snapname, atp - snapname + 1); 1744 return (0); 1745 } 1746