1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved. 29 * Copyright (c) 2014 Integros [integros.com] 30 */ 31 32 /* Portions Copyright 2010 Robert Milkowski */ 33 34 #include <sys/cred.h> 35 #include <sys/zfs_context.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_dataset.h> 39 #include <sys/dsl_prop.h> 40 #include <sys/dsl_pool.h> 41 #include <sys/dsl_synctask.h> 42 #include <sys/dsl_deleg.h> 43 #include <sys/dnode.h> 44 #include <sys/dbuf.h> 45 #include <sys/zvol.h> 46 #include <sys/dmu_tx.h> 47 #include <sys/zap.h> 48 #include <sys/zil.h> 49 #include <sys/dmu_impl.h> 50 #include <sys/zfs_ioctl.h> 51 #include <sys/sa.h> 52 #include <sys/zfs_onexit.h> 53 #include <sys/dsl_destroy.h> 54 #include <sys/vdev.h> 55 56 /* 57 * Needed to close a window in dnode_move() that allows the objset to be freed 58 * before it can be safely accessed. 59 */ 60 krwlock_t os_lock; 61 62 /* 63 * Tunable to overwrite the maximum number of threads for the parallization 64 * of dmu_objset_find_dp, needed to speed up the import of pools with many 65 * datasets. 66 * Default is 4 times the number of leaf vdevs. 67 */ 68 int dmu_find_threads = 0; 69 70 static void dmu_objset_find_dp_cb(void *arg); 71 72 void 73 dmu_objset_init(void) 74 { 75 rw_init(&os_lock, NULL, RW_DEFAULT, NULL); 76 } 77 78 void 79 dmu_objset_fini(void) 80 { 81 rw_destroy(&os_lock); 82 } 83 84 spa_t * 85 dmu_objset_spa(objset_t *os) 86 { 87 return (os->os_spa); 88 } 89 90 zilog_t * 91 dmu_objset_zil(objset_t *os) 92 { 93 return (os->os_zil); 94 } 95 96 dsl_pool_t * 97 dmu_objset_pool(objset_t *os) 98 { 99 dsl_dataset_t *ds; 100 101 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) 102 return (ds->ds_dir->dd_pool); 103 else 104 return (spa_get_dsl(os->os_spa)); 105 } 106 107 dsl_dataset_t * 108 dmu_objset_ds(objset_t *os) 109 { 110 return (os->os_dsl_dataset); 111 } 112 113 dmu_objset_type_t 114 dmu_objset_type(objset_t *os) 115 { 116 return (os->os_phys->os_type); 117 } 118 119 void 120 dmu_objset_name(objset_t *os, char *buf) 121 { 122 dsl_dataset_name(os->os_dsl_dataset, buf); 123 } 124 125 uint64_t 126 dmu_objset_id(objset_t *os) 127 { 128 dsl_dataset_t *ds = os->os_dsl_dataset; 129 130 return (ds ? ds->ds_object : 0); 131 } 132 133 zfs_sync_type_t 134 dmu_objset_syncprop(objset_t *os) 135 { 136 return (os->os_sync); 137 } 138 139 zfs_logbias_op_t 140 dmu_objset_logbias(objset_t *os) 141 { 142 return (os->os_logbias); 143 } 144 145 static void 146 checksum_changed_cb(void *arg, uint64_t newval) 147 { 148 objset_t *os = arg; 149 150 /* 151 * Inheritance should have been done by now. 152 */ 153 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 154 155 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 156 } 157 158 static void 159 compression_changed_cb(void *arg, uint64_t newval) 160 { 161 objset_t *os = arg; 162 163 /* 164 * Inheritance and range checking should have been done by now. 165 */ 166 ASSERT(newval != ZIO_COMPRESS_INHERIT); 167 168 os->os_compress = zio_compress_select(os->os_spa, newval, 169 ZIO_COMPRESS_ON); 170 } 171 172 static void 173 copies_changed_cb(void *arg, uint64_t newval) 174 { 175 objset_t *os = arg; 176 177 /* 178 * Inheritance and range checking should have been done by now. 179 */ 180 ASSERT(newval > 0); 181 ASSERT(newval <= spa_max_replication(os->os_spa)); 182 183 os->os_copies = newval; 184 } 185 186 static void 187 dedup_changed_cb(void *arg, uint64_t newval) 188 { 189 objset_t *os = arg; 190 spa_t *spa = os->os_spa; 191 enum zio_checksum checksum; 192 193 /* 194 * Inheritance should have been done by now. 195 */ 196 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 197 198 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); 199 200 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; 201 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); 202 } 203 204 static void 205 primary_cache_changed_cb(void *arg, uint64_t newval) 206 { 207 objset_t *os = arg; 208 209 /* 210 * Inheritance and range checking should have been done by now. 211 */ 212 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 213 newval == ZFS_CACHE_METADATA); 214 215 os->os_primary_cache = newval; 216 } 217 218 static void 219 secondary_cache_changed_cb(void *arg, uint64_t newval) 220 { 221 objset_t *os = arg; 222 223 /* 224 * Inheritance and range checking should have been done by now. 225 */ 226 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 227 newval == ZFS_CACHE_METADATA); 228 229 os->os_secondary_cache = newval; 230 } 231 232 static void 233 sync_changed_cb(void *arg, uint64_t newval) 234 { 235 objset_t *os = arg; 236 237 /* 238 * Inheritance and range checking should have been done by now. 239 */ 240 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || 241 newval == ZFS_SYNC_DISABLED); 242 243 os->os_sync = newval; 244 if (os->os_zil) 245 zil_set_sync(os->os_zil, newval); 246 } 247 248 static void 249 redundant_metadata_changed_cb(void *arg, uint64_t newval) 250 { 251 objset_t *os = arg; 252 253 /* 254 * Inheritance and range checking should have been done by now. 255 */ 256 ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL || 257 newval == ZFS_REDUNDANT_METADATA_MOST); 258 259 os->os_redundant_metadata = newval; 260 } 261 262 static void 263 logbias_changed_cb(void *arg, uint64_t newval) 264 { 265 objset_t *os = arg; 266 267 ASSERT(newval == ZFS_LOGBIAS_LATENCY || 268 newval == ZFS_LOGBIAS_THROUGHPUT); 269 os->os_logbias = newval; 270 if (os->os_zil) 271 zil_set_logbias(os->os_zil, newval); 272 } 273 274 static void 275 recordsize_changed_cb(void *arg, uint64_t newval) 276 { 277 objset_t *os = arg; 278 279 os->os_recordsize = newval; 280 } 281 282 void 283 dmu_objset_byteswap(void *buf, size_t size) 284 { 285 objset_phys_t *osp = buf; 286 287 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); 288 dnode_byteswap(&osp->os_meta_dnode); 289 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 290 osp->os_type = BSWAP_64(osp->os_type); 291 osp->os_flags = BSWAP_64(osp->os_flags); 292 if (size == sizeof (objset_phys_t)) { 293 dnode_byteswap(&osp->os_userused_dnode); 294 dnode_byteswap(&osp->os_groupused_dnode); 295 } 296 } 297 298 int 299 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 300 objset_t **osp) 301 { 302 objset_t *os; 303 int i, err; 304 305 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 306 307 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); 308 os->os_dsl_dataset = ds; 309 os->os_spa = spa; 310 os->os_rootbp = bp; 311 if (!BP_IS_HOLE(os->os_rootbp)) { 312 arc_flags_t aflags = ARC_FLAG_WAIT; 313 zbookmark_phys_t zb; 314 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 315 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 316 317 if (DMU_OS_IS_L2CACHEABLE(os)) 318 aflags |= ARC_FLAG_L2CACHE; 319 320 dprintf_bp(os->os_rootbp, "reading %s", ""); 321 err = arc_read(NULL, spa, os->os_rootbp, 322 arc_getbuf_func, &os->os_phys_buf, 323 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 324 if (err != 0) { 325 kmem_free(os, sizeof (objset_t)); 326 /* convert checksum errors into IO errors */ 327 if (err == ECKSUM) 328 err = SET_ERROR(EIO); 329 return (err); 330 } 331 332 /* Increase the blocksize if we are permitted. */ 333 if (spa_version(spa) >= SPA_VERSION_USERSPACE && 334 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { 335 arc_buf_t *buf = arc_alloc_buf(spa, 336 sizeof (objset_phys_t), &os->os_phys_buf, 337 ARC_BUFC_METADATA); 338 bzero(buf->b_data, sizeof (objset_phys_t)); 339 bcopy(os->os_phys_buf->b_data, buf->b_data, 340 arc_buf_size(os->os_phys_buf)); 341 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 342 os->os_phys_buf = buf; 343 } 344 345 os->os_phys = os->os_phys_buf->b_data; 346 os->os_flags = os->os_phys->os_flags; 347 } else { 348 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? 349 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; 350 os->os_phys_buf = arc_alloc_buf(spa, size, 351 &os->os_phys_buf, ARC_BUFC_METADATA); 352 os->os_phys = os->os_phys_buf->b_data; 353 bzero(os->os_phys, size); 354 } 355 356 /* 357 * Note: the changed_cb will be called once before the register 358 * func returns, thus changing the checksum/compression from the 359 * default (fletcher2/off). Snapshots don't need to know about 360 * checksum/compression/copies. 361 */ 362 if (ds != NULL) { 363 boolean_t needlock = B_FALSE; 364 365 /* 366 * Note: it's valid to open the objset if the dataset is 367 * long-held, in which case the pool_config lock will not 368 * be held. 369 */ 370 if (!dsl_pool_config_held(dmu_objset_pool(os))) { 371 needlock = B_TRUE; 372 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 373 } 374 err = dsl_prop_register(ds, 375 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 376 primary_cache_changed_cb, os); 377 if (err == 0) { 378 err = dsl_prop_register(ds, 379 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 380 secondary_cache_changed_cb, os); 381 } 382 if (!ds->ds_is_snapshot) { 383 if (err == 0) { 384 err = dsl_prop_register(ds, 385 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 386 checksum_changed_cb, os); 387 } 388 if (err == 0) { 389 err = dsl_prop_register(ds, 390 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 391 compression_changed_cb, os); 392 } 393 if (err == 0) { 394 err = dsl_prop_register(ds, 395 zfs_prop_to_name(ZFS_PROP_COPIES), 396 copies_changed_cb, os); 397 } 398 if (err == 0) { 399 err = dsl_prop_register(ds, 400 zfs_prop_to_name(ZFS_PROP_DEDUP), 401 dedup_changed_cb, os); 402 } 403 if (err == 0) { 404 err = dsl_prop_register(ds, 405 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 406 logbias_changed_cb, os); 407 } 408 if (err == 0) { 409 err = dsl_prop_register(ds, 410 zfs_prop_to_name(ZFS_PROP_SYNC), 411 sync_changed_cb, os); 412 } 413 if (err == 0) { 414 err = dsl_prop_register(ds, 415 zfs_prop_to_name( 416 ZFS_PROP_REDUNDANT_METADATA), 417 redundant_metadata_changed_cb, os); 418 } 419 if (err == 0) { 420 err = dsl_prop_register(ds, 421 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), 422 recordsize_changed_cb, os); 423 } 424 } 425 if (needlock) 426 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 427 if (err != 0) { 428 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 429 kmem_free(os, sizeof (objset_t)); 430 return (err); 431 } 432 } else { 433 /* It's the meta-objset. */ 434 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 435 os->os_compress = ZIO_COMPRESS_ON; 436 os->os_copies = spa_max_replication(spa); 437 os->os_dedup_checksum = ZIO_CHECKSUM_OFF; 438 os->os_dedup_verify = B_FALSE; 439 os->os_logbias = ZFS_LOGBIAS_LATENCY; 440 os->os_sync = ZFS_SYNC_STANDARD; 441 os->os_primary_cache = ZFS_CACHE_ALL; 442 os->os_secondary_cache = ZFS_CACHE_ALL; 443 } 444 445 if (ds == NULL || !ds->ds_is_snapshot) 446 os->os_zil_header = os->os_phys->os_zil_header; 447 os->os_zil = zil_alloc(os, &os->os_zil_header); 448 449 for (i = 0; i < TXG_SIZE; i++) { 450 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), 451 offsetof(dnode_t, dn_dirty_link[i])); 452 list_create(&os->os_free_dnodes[i], sizeof (dnode_t), 453 offsetof(dnode_t, dn_dirty_link[i])); 454 } 455 list_create(&os->os_dnodes, sizeof (dnode_t), 456 offsetof(dnode_t, dn_link)); 457 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 458 offsetof(dmu_buf_impl_t, db_link)); 459 460 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); 461 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 462 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 463 464 dnode_special_open(os, &os->os_phys->os_meta_dnode, 465 DMU_META_DNODE_OBJECT, &os->os_meta_dnode); 466 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { 467 dnode_special_open(os, &os->os_phys->os_userused_dnode, 468 DMU_USERUSED_OBJECT, &os->os_userused_dnode); 469 dnode_special_open(os, &os->os_phys->os_groupused_dnode, 470 DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); 471 } 472 473 *osp = os; 474 return (0); 475 } 476 477 int 478 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) 479 { 480 int err = 0; 481 482 /* 483 * We shouldn't be doing anything with dsl_dataset_t's unless the 484 * pool_config lock is held, or the dataset is long-held. 485 */ 486 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) || 487 dsl_dataset_long_held(ds)); 488 489 mutex_enter(&ds->ds_opening_lock); 490 if (ds->ds_objset == NULL) { 491 objset_t *os; 492 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 493 ds, dsl_dataset_get_blkptr(ds), &os); 494 495 if (err == 0) { 496 mutex_enter(&ds->ds_lock); 497 ASSERT(ds->ds_objset == NULL); 498 ds->ds_objset = os; 499 mutex_exit(&ds->ds_lock); 500 } 501 } 502 *osp = ds->ds_objset; 503 mutex_exit(&ds->ds_opening_lock); 504 return (err); 505 } 506 507 /* 508 * Holds the pool while the objset is held. Therefore only one objset 509 * can be held at a time. 510 */ 511 int 512 dmu_objset_hold(const char *name, void *tag, objset_t **osp) 513 { 514 dsl_pool_t *dp; 515 dsl_dataset_t *ds; 516 int err; 517 518 err = dsl_pool_hold(name, tag, &dp); 519 if (err != 0) 520 return (err); 521 err = dsl_dataset_hold(dp, name, tag, &ds); 522 if (err != 0) { 523 dsl_pool_rele(dp, tag); 524 return (err); 525 } 526 527 err = dmu_objset_from_ds(ds, osp); 528 if (err != 0) { 529 dsl_dataset_rele(ds, tag); 530 dsl_pool_rele(dp, tag); 531 } 532 533 return (err); 534 } 535 536 static int 537 dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type, 538 boolean_t readonly, void *tag, objset_t **osp) 539 { 540 int err; 541 542 err = dmu_objset_from_ds(ds, osp); 543 if (err != 0) { 544 dsl_dataset_disown(ds, tag); 545 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { 546 dsl_dataset_disown(ds, tag); 547 return (SET_ERROR(EINVAL)); 548 } else if (!readonly && dsl_dataset_is_snapshot(ds)) { 549 dsl_dataset_disown(ds, tag); 550 return (SET_ERROR(EROFS)); 551 } 552 return (err); 553 } 554 555 /* 556 * dsl_pool must not be held when this is called. 557 * Upon successful return, there will be a longhold on the dataset, 558 * and the dsl_pool will not be held. 559 */ 560 int 561 dmu_objset_own(const char *name, dmu_objset_type_t type, 562 boolean_t readonly, void *tag, objset_t **osp) 563 { 564 dsl_pool_t *dp; 565 dsl_dataset_t *ds; 566 int err; 567 568 err = dsl_pool_hold(name, FTAG, &dp); 569 if (err != 0) 570 return (err); 571 err = dsl_dataset_own(dp, name, tag, &ds); 572 if (err != 0) { 573 dsl_pool_rele(dp, FTAG); 574 return (err); 575 } 576 err = dmu_objset_own_impl(ds, type, readonly, tag, osp); 577 dsl_pool_rele(dp, FTAG); 578 579 return (err); 580 } 581 582 int 583 dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type, 584 boolean_t readonly, void *tag, objset_t **osp) 585 { 586 dsl_dataset_t *ds; 587 int err; 588 589 err = dsl_dataset_own_obj(dp, obj, tag, &ds); 590 if (err != 0) 591 return (err); 592 593 return (dmu_objset_own_impl(ds, type, readonly, tag, osp)); 594 } 595 596 void 597 dmu_objset_rele(objset_t *os, void *tag) 598 { 599 dsl_pool_t *dp = dmu_objset_pool(os); 600 dsl_dataset_rele(os->os_dsl_dataset, tag); 601 dsl_pool_rele(dp, tag); 602 } 603 604 /* 605 * When we are called, os MUST refer to an objset associated with a dataset 606 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner 607 * == tag. We will then release and reacquire ownership of the dataset while 608 * holding the pool config_rwlock to avoid intervening namespace or ownership 609 * changes may occur. 610 * 611 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to 612 * release the hold on its dataset and acquire a new one on the dataset of the 613 * same name so that it can be partially torn down and reconstructed. 614 */ 615 void 616 dmu_objset_refresh_ownership(objset_t *os, void *tag) 617 { 618 dsl_pool_t *dp; 619 dsl_dataset_t *ds, *newds; 620 char name[ZFS_MAX_DATASET_NAME_LEN]; 621 622 ds = os->os_dsl_dataset; 623 VERIFY3P(ds, !=, NULL); 624 VERIFY3P(ds->ds_owner, ==, tag); 625 VERIFY(dsl_dataset_long_held(ds)); 626 627 dsl_dataset_name(ds, name); 628 dp = dmu_objset_pool(os); 629 dsl_pool_config_enter(dp, FTAG); 630 dmu_objset_disown(os, tag); 631 VERIFY0(dsl_dataset_own(dp, name, tag, &newds)); 632 VERIFY3P(newds, ==, os->os_dsl_dataset); 633 dsl_pool_config_exit(dp, FTAG); 634 } 635 636 void 637 dmu_objset_disown(objset_t *os, void *tag) 638 { 639 dsl_dataset_disown(os->os_dsl_dataset, tag); 640 } 641 642 void 643 dmu_objset_evict_dbufs(objset_t *os) 644 { 645 dnode_t dn_marker; 646 dnode_t *dn; 647 648 mutex_enter(&os->os_lock); 649 dn = list_head(&os->os_dnodes); 650 while (dn != NULL) { 651 /* 652 * Skip dnodes without holds. We have to do this dance 653 * because dnode_add_ref() only works if there is already a 654 * hold. If the dnode has no holds, then it has no dbufs. 655 */ 656 if (dnode_add_ref(dn, FTAG)) { 657 list_insert_after(&os->os_dnodes, dn, &dn_marker); 658 mutex_exit(&os->os_lock); 659 660 dnode_evict_dbufs(dn); 661 dnode_rele(dn, FTAG); 662 663 mutex_enter(&os->os_lock); 664 dn = list_next(&os->os_dnodes, &dn_marker); 665 list_remove(&os->os_dnodes, &dn_marker); 666 } else { 667 dn = list_next(&os->os_dnodes, dn); 668 } 669 } 670 mutex_exit(&os->os_lock); 671 672 if (DMU_USERUSED_DNODE(os) != NULL) { 673 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os)); 674 dnode_evict_dbufs(DMU_USERUSED_DNODE(os)); 675 } 676 dnode_evict_dbufs(DMU_META_DNODE(os)); 677 } 678 679 /* 680 * Objset eviction processing is split into into two pieces. 681 * The first marks the objset as evicting, evicts any dbufs that 682 * have a refcount of zero, and then queues up the objset for the 683 * second phase of eviction. Once os->os_dnodes has been cleared by 684 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed. 685 * The second phase closes the special dnodes, dequeues the objset from 686 * the list of those undergoing eviction, and finally frees the objset. 687 * 688 * NOTE: Due to asynchronous eviction processing (invocation of 689 * dnode_buf_pageout()), it is possible for the meta dnode for the 690 * objset to have no holds even though os->os_dnodes is not empty. 691 */ 692 void 693 dmu_objset_evict(objset_t *os) 694 { 695 dsl_dataset_t *ds = os->os_dsl_dataset; 696 697 for (int t = 0; t < TXG_SIZE; t++) 698 ASSERT(!dmu_objset_is_dirty(os, t)); 699 700 if (ds) 701 dsl_prop_unregister_all(ds, os); 702 703 if (os->os_sa) 704 sa_tear_down(os); 705 706 dmu_objset_evict_dbufs(os); 707 708 mutex_enter(&os->os_lock); 709 spa_evicting_os_register(os->os_spa, os); 710 if (list_is_empty(&os->os_dnodes)) { 711 mutex_exit(&os->os_lock); 712 dmu_objset_evict_done(os); 713 } else { 714 mutex_exit(&os->os_lock); 715 } 716 } 717 718 void 719 dmu_objset_evict_done(objset_t *os) 720 { 721 ASSERT3P(list_head(&os->os_dnodes), ==, NULL); 722 723 dnode_special_close(&os->os_meta_dnode); 724 if (DMU_USERUSED_DNODE(os)) { 725 dnode_special_close(&os->os_userused_dnode); 726 dnode_special_close(&os->os_groupused_dnode); 727 } 728 zil_free(os->os_zil); 729 730 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 731 732 /* 733 * This is a barrier to prevent the objset from going away in 734 * dnode_move() until we can safely ensure that the objset is still in 735 * use. We consider the objset valid before the barrier and invalid 736 * after the barrier. 737 */ 738 rw_enter(&os_lock, RW_READER); 739 rw_exit(&os_lock); 740 741 mutex_destroy(&os->os_lock); 742 mutex_destroy(&os->os_obj_lock); 743 mutex_destroy(&os->os_user_ptr_lock); 744 spa_evicting_os_deregister(os->os_spa, os); 745 kmem_free(os, sizeof (objset_t)); 746 } 747 748 timestruc_t 749 dmu_objset_snap_cmtime(objset_t *os) 750 { 751 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); 752 } 753 754 /* called from dsl for meta-objset */ 755 objset_t * 756 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 757 dmu_objset_type_t type, dmu_tx_t *tx) 758 { 759 objset_t *os; 760 dnode_t *mdn; 761 762 ASSERT(dmu_tx_is_syncing(tx)); 763 764 if (ds != NULL) 765 VERIFY0(dmu_objset_from_ds(ds, &os)); 766 else 767 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); 768 769 mdn = DMU_META_DNODE(os); 770 771 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 772 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 773 774 /* 775 * We don't want to have to increase the meta-dnode's nlevels 776 * later, because then we could do it in quescing context while 777 * we are also accessing it in open context. 778 * 779 * This precaution is not necessary for the MOS (ds == NULL), 780 * because the MOS is only updated in syncing context. 781 * This is most fortunate: the MOS is the only objset that 782 * needs to be synced multiple times as spa_sync() iterates 783 * to convergence, so minimizing its dn_nlevels matters. 784 */ 785 if (ds != NULL) { 786 int levels = 1; 787 788 /* 789 * Determine the number of levels necessary for the meta-dnode 790 * to contain DN_MAX_OBJECT dnodes. Note that in order to 791 * ensure that we do not overflow 64 bits, there has to be 792 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT 793 * but < 2^64. Therefore, 794 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be 795 * less than (64 - log2(DN_MAX_OBJECT)) (16). 796 */ 797 while ((uint64_t)mdn->dn_nblkptr << 798 (mdn->dn_datablkshift - DNODE_SHIFT + 799 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 800 DN_MAX_OBJECT) 801 levels++; 802 803 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 804 mdn->dn_nlevels = levels; 805 } 806 807 ASSERT(type != DMU_OST_NONE); 808 ASSERT(type != DMU_OST_ANY); 809 ASSERT(type < DMU_OST_NUMTYPES); 810 os->os_phys->os_type = type; 811 if (dmu_objset_userused_enabled(os)) { 812 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 813 os->os_flags = os->os_phys->os_flags; 814 } 815 816 dsl_dataset_dirty(ds, tx); 817 818 return (os); 819 } 820 821 typedef struct dmu_objset_create_arg { 822 const char *doca_name; 823 cred_t *doca_cred; 824 void (*doca_userfunc)(objset_t *os, void *arg, 825 cred_t *cr, dmu_tx_t *tx); 826 void *doca_userarg; 827 dmu_objset_type_t doca_type; 828 uint64_t doca_flags; 829 } dmu_objset_create_arg_t; 830 831 /*ARGSUSED*/ 832 static int 833 dmu_objset_create_check(void *arg, dmu_tx_t *tx) 834 { 835 dmu_objset_create_arg_t *doca = arg; 836 dsl_pool_t *dp = dmu_tx_pool(tx); 837 dsl_dir_t *pdd; 838 const char *tail; 839 int error; 840 841 if (strchr(doca->doca_name, '@') != NULL) 842 return (SET_ERROR(EINVAL)); 843 844 if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN) 845 return (SET_ERROR(ENAMETOOLONG)); 846 847 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); 848 if (error != 0) 849 return (error); 850 if (tail == NULL) { 851 dsl_dir_rele(pdd, FTAG); 852 return (SET_ERROR(EEXIST)); 853 } 854 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 855 doca->doca_cred); 856 dsl_dir_rele(pdd, FTAG); 857 858 return (error); 859 } 860 861 static void 862 dmu_objset_create_sync(void *arg, dmu_tx_t *tx) 863 { 864 dmu_objset_create_arg_t *doca = arg; 865 dsl_pool_t *dp = dmu_tx_pool(tx); 866 dsl_dir_t *pdd; 867 const char *tail; 868 dsl_dataset_t *ds; 869 uint64_t obj; 870 blkptr_t *bp; 871 objset_t *os; 872 873 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); 874 875 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, 876 doca->doca_cred, tx); 877 878 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 879 bp = dsl_dataset_get_blkptr(ds); 880 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, 881 ds, bp, doca->doca_type, tx); 882 883 if (doca->doca_userfunc != NULL) { 884 doca->doca_userfunc(os, doca->doca_userarg, 885 doca->doca_cred, tx); 886 } 887 888 spa_history_log_internal_ds(ds, "create", tx, ""); 889 dsl_dataset_rele(ds, FTAG); 890 dsl_dir_rele(pdd, FTAG); 891 } 892 893 int 894 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 895 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 896 { 897 dmu_objset_create_arg_t doca; 898 899 doca.doca_name = name; 900 doca.doca_cred = CRED(); 901 doca.doca_flags = flags; 902 doca.doca_userfunc = func; 903 doca.doca_userarg = arg; 904 doca.doca_type = type; 905 906 return (dsl_sync_task(name, 907 dmu_objset_create_check, dmu_objset_create_sync, &doca, 908 5, ZFS_SPACE_CHECK_NORMAL)); 909 } 910 911 typedef struct dmu_objset_clone_arg { 912 const char *doca_clone; 913 const char *doca_origin; 914 cred_t *doca_cred; 915 } dmu_objset_clone_arg_t; 916 917 /*ARGSUSED*/ 918 static int 919 dmu_objset_clone_check(void *arg, dmu_tx_t *tx) 920 { 921 dmu_objset_clone_arg_t *doca = arg; 922 dsl_dir_t *pdd; 923 const char *tail; 924 int error; 925 dsl_dataset_t *origin; 926 dsl_pool_t *dp = dmu_tx_pool(tx); 927 928 if (strchr(doca->doca_clone, '@') != NULL) 929 return (SET_ERROR(EINVAL)); 930 931 if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN) 932 return (SET_ERROR(ENAMETOOLONG)); 933 934 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); 935 if (error != 0) 936 return (error); 937 if (tail == NULL) { 938 dsl_dir_rele(pdd, FTAG); 939 return (SET_ERROR(EEXIST)); 940 } 941 942 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 943 doca->doca_cred); 944 if (error != 0) { 945 dsl_dir_rele(pdd, FTAG); 946 return (SET_ERROR(EDQUOT)); 947 } 948 dsl_dir_rele(pdd, FTAG); 949 950 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); 951 if (error != 0) 952 return (error); 953 954 /* You can only clone snapshots, not the head datasets. */ 955 if (!origin->ds_is_snapshot) { 956 dsl_dataset_rele(origin, FTAG); 957 return (SET_ERROR(EINVAL)); 958 } 959 dsl_dataset_rele(origin, FTAG); 960 961 return (0); 962 } 963 964 static void 965 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) 966 { 967 dmu_objset_clone_arg_t *doca = arg; 968 dsl_pool_t *dp = dmu_tx_pool(tx); 969 dsl_dir_t *pdd; 970 const char *tail; 971 dsl_dataset_t *origin, *ds; 972 uint64_t obj; 973 char namebuf[ZFS_MAX_DATASET_NAME_LEN]; 974 975 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); 976 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); 977 978 obj = dsl_dataset_create_sync(pdd, tail, origin, 0, 979 doca->doca_cred, tx); 980 981 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 982 dsl_dataset_name(origin, namebuf); 983 spa_history_log_internal_ds(ds, "clone", tx, 984 "origin=%s (%llu)", namebuf, origin->ds_object); 985 dsl_dataset_rele(ds, FTAG); 986 dsl_dataset_rele(origin, FTAG); 987 dsl_dir_rele(pdd, FTAG); 988 } 989 990 int 991 dmu_objset_clone(const char *clone, const char *origin) 992 { 993 dmu_objset_clone_arg_t doca; 994 995 doca.doca_clone = clone; 996 doca.doca_origin = origin; 997 doca.doca_cred = CRED(); 998 999 return (dsl_sync_task(clone, 1000 dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 1001 5, ZFS_SPACE_CHECK_NORMAL)); 1002 } 1003 1004 int 1005 dmu_objset_snapshot_one(const char *fsname, const char *snapname) 1006 { 1007 int err; 1008 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); 1009 nvlist_t *snaps = fnvlist_alloc(); 1010 1011 fnvlist_add_boolean(snaps, longsnap); 1012 strfree(longsnap); 1013 err = dsl_dataset_snapshot(snaps, NULL, NULL); 1014 fnvlist_free(snaps); 1015 return (err); 1016 } 1017 1018 static void 1019 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) 1020 { 1021 dnode_t *dn; 1022 1023 while (dn = list_head(list)) { 1024 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 1025 ASSERT(dn->dn_dbuf->db_data_pending); 1026 /* 1027 * Initialize dn_zio outside dnode_sync() because the 1028 * meta-dnode needs to set it ouside dnode_sync(). 1029 */ 1030 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 1031 ASSERT(dn->dn_zio); 1032 1033 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 1034 list_remove(list, dn); 1035 1036 if (newlist) { 1037 (void) dnode_add_ref(dn, newlist); 1038 list_insert_tail(newlist, dn); 1039 } 1040 1041 dnode_sync(dn, tx); 1042 } 1043 } 1044 1045 /* ARGSUSED */ 1046 static void 1047 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) 1048 { 1049 blkptr_t *bp = zio->io_bp; 1050 objset_t *os = arg; 1051 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 1052 1053 ASSERT(!BP_IS_EMBEDDED(bp)); 1054 ASSERT3P(bp, ==, os->os_rootbp); 1055 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); 1056 ASSERT0(BP_GET_LEVEL(bp)); 1057 1058 /* 1059 * Update rootbp fill count: it should be the number of objects 1060 * allocated in the object set (not counting the "special" 1061 * objects that are stored in the objset_phys_t -- the meta 1062 * dnode and user/group accounting objects). 1063 */ 1064 bp->blk_fill = 0; 1065 for (int i = 0; i < dnp->dn_nblkptr; i++) 1066 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]); 1067 } 1068 1069 /* ARGSUSED */ 1070 static void 1071 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) 1072 { 1073 blkptr_t *bp = zio->io_bp; 1074 blkptr_t *bp_orig = &zio->io_bp_orig; 1075 objset_t *os = arg; 1076 1077 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 1078 ASSERT(BP_EQUAL(bp, bp_orig)); 1079 } else { 1080 dsl_dataset_t *ds = os->os_dsl_dataset; 1081 dmu_tx_t *tx = os->os_synctx; 1082 1083 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 1084 dsl_dataset_block_born(ds, bp, tx); 1085 } 1086 } 1087 1088 /* called from dsl */ 1089 void 1090 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) 1091 { 1092 int txgoff; 1093 zbookmark_phys_t zb; 1094 zio_prop_t zp; 1095 zio_t *zio; 1096 list_t *list; 1097 list_t *newlist = NULL; 1098 dbuf_dirty_record_t *dr; 1099 1100 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 1101 1102 ASSERT(dmu_tx_is_syncing(tx)); 1103 /* XXX the write_done callback should really give us the tx... */ 1104 os->os_synctx = tx; 1105 1106 if (os->os_dsl_dataset == NULL) { 1107 /* 1108 * This is the MOS. If we have upgraded, 1109 * spa_max_replication() could change, so reset 1110 * os_copies here. 1111 */ 1112 os->os_copies = spa_max_replication(os->os_spa); 1113 } 1114 1115 /* 1116 * Create the root block IO 1117 */ 1118 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 1119 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1120 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1121 arc_release(os->os_phys_buf, &os->os_phys_buf); 1122 1123 dmu_write_policy(os, NULL, 0, 0, &zp); 1124 1125 zio = arc_write(pio, os->os_spa, tx->tx_txg, 1126 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), 1127 &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done, 1128 os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 1129 1130 /* 1131 * Sync special dnodes - the parent IO for the sync is the root block 1132 */ 1133 DMU_META_DNODE(os)->dn_zio = zio; 1134 dnode_sync(DMU_META_DNODE(os), tx); 1135 1136 os->os_phys->os_flags = os->os_flags; 1137 1138 if (DMU_USERUSED_DNODE(os) && 1139 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { 1140 DMU_USERUSED_DNODE(os)->dn_zio = zio; 1141 dnode_sync(DMU_USERUSED_DNODE(os), tx); 1142 DMU_GROUPUSED_DNODE(os)->dn_zio = zio; 1143 dnode_sync(DMU_GROUPUSED_DNODE(os), tx); 1144 } 1145 1146 txgoff = tx->tx_txg & TXG_MASK; 1147 1148 if (dmu_objset_userused_enabled(os)) { 1149 newlist = &os->os_synced_dnodes; 1150 /* 1151 * We must create the list here because it uses the 1152 * dn_dirty_link[] of this txg. 1153 */ 1154 list_create(newlist, sizeof (dnode_t), 1155 offsetof(dnode_t, dn_dirty_link[txgoff])); 1156 } 1157 1158 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); 1159 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); 1160 1161 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; 1162 while (dr = list_head(list)) { 1163 ASSERT0(dr->dr_dbuf->db_level); 1164 list_remove(list, dr); 1165 if (dr->dr_zio) 1166 zio_nowait(dr->dr_zio); 1167 } 1168 /* 1169 * Free intent log blocks up to this tx. 1170 */ 1171 zil_sync(os->os_zil, tx); 1172 os->os_phys->os_zil_header = os->os_zil_header; 1173 zio_nowait(zio); 1174 } 1175 1176 boolean_t 1177 dmu_objset_is_dirty(objset_t *os, uint64_t txg) 1178 { 1179 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) || 1180 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK])); 1181 } 1182 1183 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; 1184 1185 void 1186 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) 1187 { 1188 used_cbs[ost] = cb; 1189 } 1190 1191 boolean_t 1192 dmu_objset_userused_enabled(objset_t *os) 1193 { 1194 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && 1195 used_cbs[os->os_phys->os_type] != NULL && 1196 DMU_USERUSED_DNODE(os) != NULL); 1197 } 1198 1199 static void 1200 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags, 1201 uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx) 1202 { 1203 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) { 1204 int64_t delta = DNODE_SIZE + used; 1205 if (subtract) 1206 delta = -delta; 1207 VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT, 1208 user, delta, tx)); 1209 VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT, 1210 group, delta, tx)); 1211 } 1212 } 1213 1214 void 1215 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) 1216 { 1217 dnode_t *dn; 1218 list_t *list = &os->os_synced_dnodes; 1219 1220 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os)); 1221 1222 while (dn = list_head(list)) { 1223 int flags; 1224 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); 1225 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || 1226 dn->dn_phys->dn_flags & 1227 DNODE_FLAG_USERUSED_ACCOUNTED); 1228 1229 /* Allocate the user/groupused objects if necessary. */ 1230 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { 1231 VERIFY(0 == zap_create_claim(os, 1232 DMU_USERUSED_OBJECT, 1233 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1234 VERIFY(0 == zap_create_claim(os, 1235 DMU_GROUPUSED_OBJECT, 1236 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1237 } 1238 1239 /* 1240 * We intentionally modify the zap object even if the 1241 * net delta is zero. Otherwise 1242 * the block of the zap obj could be shared between 1243 * datasets but need to be different between them after 1244 * a bprewrite. 1245 */ 1246 1247 flags = dn->dn_id_flags; 1248 ASSERT(flags); 1249 if (flags & DN_ID_OLD_EXIST) { 1250 do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags, 1251 dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx); 1252 } 1253 if (flags & DN_ID_NEW_EXIST) { 1254 do_userquota_update(os, DN_USED_BYTES(dn->dn_phys), 1255 dn->dn_phys->dn_flags, dn->dn_newuid, 1256 dn->dn_newgid, B_FALSE, tx); 1257 } 1258 1259 mutex_enter(&dn->dn_mtx); 1260 dn->dn_oldused = 0; 1261 dn->dn_oldflags = 0; 1262 if (dn->dn_id_flags & DN_ID_NEW_EXIST) { 1263 dn->dn_olduid = dn->dn_newuid; 1264 dn->dn_oldgid = dn->dn_newgid; 1265 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1266 if (dn->dn_bonuslen == 0) 1267 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1268 else 1269 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1270 } 1271 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); 1272 mutex_exit(&dn->dn_mtx); 1273 1274 list_remove(list, dn); 1275 dnode_rele(dn, list); 1276 } 1277 } 1278 1279 /* 1280 * Returns a pointer to data to find uid/gid from 1281 * 1282 * If a dirty record for transaction group that is syncing can't 1283 * be found then NULL is returned. In the NULL case it is assumed 1284 * the uid/gid aren't changing. 1285 */ 1286 static void * 1287 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) 1288 { 1289 dbuf_dirty_record_t *dr, **drp; 1290 void *data; 1291 1292 if (db->db_dirtycnt == 0) 1293 return (db->db.db_data); /* Nothing is changing */ 1294 1295 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1296 if (dr->dr_txg == tx->tx_txg) 1297 break; 1298 1299 if (dr == NULL) { 1300 data = NULL; 1301 } else { 1302 dnode_t *dn; 1303 1304 DB_DNODE_ENTER(dr->dr_dbuf); 1305 dn = DB_DNODE(dr->dr_dbuf); 1306 1307 if (dn->dn_bonuslen == 0 && 1308 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) 1309 data = dr->dt.dl.dr_data->b_data; 1310 else 1311 data = dr->dt.dl.dr_data; 1312 1313 DB_DNODE_EXIT(dr->dr_dbuf); 1314 } 1315 1316 return (data); 1317 } 1318 1319 void 1320 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) 1321 { 1322 objset_t *os = dn->dn_objset; 1323 void *data = NULL; 1324 dmu_buf_impl_t *db = NULL; 1325 uint64_t *user = NULL; 1326 uint64_t *group = NULL; 1327 int flags = dn->dn_id_flags; 1328 int error; 1329 boolean_t have_spill = B_FALSE; 1330 1331 if (!dmu_objset_userused_enabled(dn->dn_objset)) 1332 return; 1333 1334 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| 1335 DN_ID_CHKED_SPILL))) 1336 return; 1337 1338 if (before && dn->dn_bonuslen != 0) 1339 data = DN_BONUS(dn->dn_phys); 1340 else if (!before && dn->dn_bonuslen != 0) { 1341 if (dn->dn_bonus) { 1342 db = dn->dn_bonus; 1343 mutex_enter(&db->db_mtx); 1344 data = dmu_objset_userquota_find_data(db, tx); 1345 } else { 1346 data = DN_BONUS(dn->dn_phys); 1347 } 1348 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { 1349 int rf = 0; 1350 1351 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) 1352 rf |= DB_RF_HAVESTRUCT; 1353 error = dmu_spill_hold_by_dnode(dn, 1354 rf | DB_RF_MUST_SUCCEED, 1355 FTAG, (dmu_buf_t **)&db); 1356 ASSERT(error == 0); 1357 mutex_enter(&db->db_mtx); 1358 data = (before) ? db->db.db_data : 1359 dmu_objset_userquota_find_data(db, tx); 1360 have_spill = B_TRUE; 1361 } else { 1362 mutex_enter(&dn->dn_mtx); 1363 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1364 mutex_exit(&dn->dn_mtx); 1365 return; 1366 } 1367 1368 if (before) { 1369 ASSERT(data); 1370 user = &dn->dn_olduid; 1371 group = &dn->dn_oldgid; 1372 } else if (data) { 1373 user = &dn->dn_newuid; 1374 group = &dn->dn_newgid; 1375 } 1376 1377 /* 1378 * Must always call the callback in case the object 1379 * type has changed and that type isn't an object type to track 1380 */ 1381 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, 1382 user, group); 1383 1384 /* 1385 * Preserve existing uid/gid when the callback can't determine 1386 * what the new uid/gid are and the callback returned EEXIST. 1387 * The EEXIST error tells us to just use the existing uid/gid. 1388 * If we don't know what the old values are then just assign 1389 * them to 0, since that is a new file being created. 1390 */ 1391 if (!before && data == NULL && error == EEXIST) { 1392 if (flags & DN_ID_OLD_EXIST) { 1393 dn->dn_newuid = dn->dn_olduid; 1394 dn->dn_newgid = dn->dn_oldgid; 1395 } else { 1396 dn->dn_newuid = 0; 1397 dn->dn_newgid = 0; 1398 } 1399 error = 0; 1400 } 1401 1402 if (db) 1403 mutex_exit(&db->db_mtx); 1404 1405 mutex_enter(&dn->dn_mtx); 1406 if (error == 0 && before) 1407 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1408 if (error == 0 && !before) 1409 dn->dn_id_flags |= DN_ID_NEW_EXIST; 1410 1411 if (have_spill) { 1412 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1413 } else { 1414 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1415 } 1416 mutex_exit(&dn->dn_mtx); 1417 if (have_spill) 1418 dmu_buf_rele((dmu_buf_t *)db, FTAG); 1419 } 1420 1421 boolean_t 1422 dmu_objset_userspace_present(objset_t *os) 1423 { 1424 return (os->os_phys->os_flags & 1425 OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1426 } 1427 1428 int 1429 dmu_objset_userspace_upgrade(objset_t *os) 1430 { 1431 uint64_t obj; 1432 int err = 0; 1433 1434 if (dmu_objset_userspace_present(os)) 1435 return (0); 1436 if (!dmu_objset_userused_enabled(os)) 1437 return (SET_ERROR(ENOTSUP)); 1438 if (dmu_objset_is_snapshot(os)) 1439 return (SET_ERROR(EINVAL)); 1440 1441 /* 1442 * We simply need to mark every object dirty, so that it will be 1443 * synced out and now accounted. If this is called 1444 * concurrently, or if we already did some work before crashing, 1445 * that's fine, since we track each object's accounted state 1446 * independently. 1447 */ 1448 1449 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 1450 dmu_tx_t *tx; 1451 dmu_buf_t *db; 1452 int objerr; 1453 1454 if (issig(JUSTLOOKING) && issig(FORREAL)) 1455 return (SET_ERROR(EINTR)); 1456 1457 objerr = dmu_bonus_hold(os, obj, FTAG, &db); 1458 if (objerr != 0) 1459 continue; 1460 tx = dmu_tx_create(os); 1461 dmu_tx_hold_bonus(tx, obj); 1462 objerr = dmu_tx_assign(tx, TXG_WAIT); 1463 if (objerr != 0) { 1464 dmu_tx_abort(tx); 1465 continue; 1466 } 1467 dmu_buf_will_dirty(db, tx); 1468 dmu_buf_rele(db, FTAG); 1469 dmu_tx_commit(tx); 1470 } 1471 1472 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 1473 txg_wait_synced(dmu_objset_pool(os), 0); 1474 return (0); 1475 } 1476 1477 void 1478 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 1479 uint64_t *usedobjsp, uint64_t *availobjsp) 1480 { 1481 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, 1482 usedobjsp, availobjsp); 1483 } 1484 1485 uint64_t 1486 dmu_objset_fsid_guid(objset_t *os) 1487 { 1488 return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); 1489 } 1490 1491 void 1492 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 1493 { 1494 stat->dds_type = os->os_phys->os_type; 1495 if (os->os_dsl_dataset) 1496 dsl_dataset_fast_stat(os->os_dsl_dataset, stat); 1497 } 1498 1499 void 1500 dmu_objset_stats(objset_t *os, nvlist_t *nv) 1501 { 1502 ASSERT(os->os_dsl_dataset || 1503 os->os_phys->os_type == DMU_OST_META); 1504 1505 if (os->os_dsl_dataset != NULL) 1506 dsl_dataset_stats(os->os_dsl_dataset, nv); 1507 1508 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 1509 os->os_phys->os_type); 1510 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, 1511 dmu_objset_userspace_present(os)); 1512 } 1513 1514 int 1515 dmu_objset_is_snapshot(objset_t *os) 1516 { 1517 if (os->os_dsl_dataset != NULL) 1518 return (os->os_dsl_dataset->ds_is_snapshot); 1519 else 1520 return (B_FALSE); 1521 } 1522 1523 int 1524 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 1525 boolean_t *conflict) 1526 { 1527 dsl_dataset_t *ds = os->os_dsl_dataset; 1528 uint64_t ignored; 1529 1530 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1531 return (SET_ERROR(ENOENT)); 1532 1533 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 1534 dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored, 1535 MT_FIRST, real, maxlen, conflict)); 1536 } 1537 1538 int 1539 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 1540 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 1541 { 1542 dsl_dataset_t *ds = os->os_dsl_dataset; 1543 zap_cursor_t cursor; 1544 zap_attribute_t attr; 1545 1546 ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); 1547 1548 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1549 return (SET_ERROR(ENOENT)); 1550 1551 zap_cursor_init_serialized(&cursor, 1552 ds->ds_dir->dd_pool->dp_meta_objset, 1553 dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp); 1554 1555 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1556 zap_cursor_fini(&cursor); 1557 return (SET_ERROR(ENOENT)); 1558 } 1559 1560 if (strlen(attr.za_name) + 1 > namelen) { 1561 zap_cursor_fini(&cursor); 1562 return (SET_ERROR(ENAMETOOLONG)); 1563 } 1564 1565 (void) strcpy(name, attr.za_name); 1566 if (idp) 1567 *idp = attr.za_first_integer; 1568 if (case_conflict) 1569 *case_conflict = attr.za_normalization_conflict; 1570 zap_cursor_advance(&cursor); 1571 *offp = zap_cursor_serialize(&cursor); 1572 zap_cursor_fini(&cursor); 1573 1574 return (0); 1575 } 1576 1577 int 1578 dmu_dir_list_next(objset_t *os, int namelen, char *name, 1579 uint64_t *idp, uint64_t *offp) 1580 { 1581 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; 1582 zap_cursor_t cursor; 1583 zap_attribute_t attr; 1584 1585 /* there is no next dir on a snapshot! */ 1586 if (os->os_dsl_dataset->ds_object != 1587 dsl_dir_phys(dd)->dd_head_dataset_obj) 1588 return (SET_ERROR(ENOENT)); 1589 1590 zap_cursor_init_serialized(&cursor, 1591 dd->dd_pool->dp_meta_objset, 1592 dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp); 1593 1594 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1595 zap_cursor_fini(&cursor); 1596 return (SET_ERROR(ENOENT)); 1597 } 1598 1599 if (strlen(attr.za_name) + 1 > namelen) { 1600 zap_cursor_fini(&cursor); 1601 return (SET_ERROR(ENAMETOOLONG)); 1602 } 1603 1604 (void) strcpy(name, attr.za_name); 1605 if (idp) 1606 *idp = attr.za_first_integer; 1607 zap_cursor_advance(&cursor); 1608 *offp = zap_cursor_serialize(&cursor); 1609 zap_cursor_fini(&cursor); 1610 1611 return (0); 1612 } 1613 1614 typedef struct dmu_objset_find_ctx { 1615 taskq_t *dc_tq; 1616 dsl_pool_t *dc_dp; 1617 uint64_t dc_ddobj; 1618 int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *); 1619 void *dc_arg; 1620 int dc_flags; 1621 kmutex_t *dc_error_lock; 1622 int *dc_error; 1623 } dmu_objset_find_ctx_t; 1624 1625 static void 1626 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp) 1627 { 1628 dsl_pool_t *dp = dcp->dc_dp; 1629 dmu_objset_find_ctx_t *child_dcp; 1630 dsl_dir_t *dd; 1631 dsl_dataset_t *ds; 1632 zap_cursor_t zc; 1633 zap_attribute_t *attr; 1634 uint64_t thisobj; 1635 int err = 0; 1636 1637 /* don't process if there already was an error */ 1638 if (*dcp->dc_error != 0) 1639 goto out; 1640 1641 err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, NULL, FTAG, &dd); 1642 if (err != 0) 1643 goto out; 1644 1645 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1646 if (dd->dd_myname[0] == '$') { 1647 dsl_dir_rele(dd, FTAG); 1648 goto out; 1649 } 1650 1651 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 1652 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1653 1654 /* 1655 * Iterate over all children. 1656 */ 1657 if (dcp->dc_flags & DS_FIND_CHILDREN) { 1658 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1659 dsl_dir_phys(dd)->dd_child_dir_zapobj); 1660 zap_cursor_retrieve(&zc, attr) == 0; 1661 (void) zap_cursor_advance(&zc)) { 1662 ASSERT3U(attr->za_integer_length, ==, 1663 sizeof (uint64_t)); 1664 ASSERT3U(attr->za_num_integers, ==, 1); 1665 1666 child_dcp = kmem_alloc(sizeof (*child_dcp), KM_SLEEP); 1667 *child_dcp = *dcp; 1668 child_dcp->dc_ddobj = attr->za_first_integer; 1669 if (dcp->dc_tq != NULL) 1670 (void) taskq_dispatch(dcp->dc_tq, 1671 dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP); 1672 else 1673 dmu_objset_find_dp_impl(child_dcp); 1674 } 1675 zap_cursor_fini(&zc); 1676 } 1677 1678 /* 1679 * Iterate over all snapshots. 1680 */ 1681 if (dcp->dc_flags & DS_FIND_SNAPSHOTS) { 1682 dsl_dataset_t *ds; 1683 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1684 1685 if (err == 0) { 1686 uint64_t snapobj; 1687 1688 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 1689 dsl_dataset_rele(ds, FTAG); 1690 1691 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1692 zap_cursor_retrieve(&zc, attr) == 0; 1693 (void) zap_cursor_advance(&zc)) { 1694 ASSERT3U(attr->za_integer_length, ==, 1695 sizeof (uint64_t)); 1696 ASSERT3U(attr->za_num_integers, ==, 1); 1697 1698 err = dsl_dataset_hold_obj(dp, 1699 attr->za_first_integer, FTAG, &ds); 1700 if (err != 0) 1701 break; 1702 err = dcp->dc_func(dp, ds, dcp->dc_arg); 1703 dsl_dataset_rele(ds, FTAG); 1704 if (err != 0) 1705 break; 1706 } 1707 zap_cursor_fini(&zc); 1708 } 1709 } 1710 1711 dsl_dir_rele(dd, FTAG); 1712 kmem_free(attr, sizeof (zap_attribute_t)); 1713 1714 if (err != 0) 1715 goto out; 1716 1717 /* 1718 * Apply to self. 1719 */ 1720 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1721 if (err != 0) 1722 goto out; 1723 err = dcp->dc_func(dp, ds, dcp->dc_arg); 1724 dsl_dataset_rele(ds, FTAG); 1725 1726 out: 1727 if (err != 0) { 1728 mutex_enter(dcp->dc_error_lock); 1729 /* only keep first error */ 1730 if (*dcp->dc_error == 0) 1731 *dcp->dc_error = err; 1732 mutex_exit(dcp->dc_error_lock); 1733 } 1734 1735 kmem_free(dcp, sizeof (*dcp)); 1736 } 1737 1738 static void 1739 dmu_objset_find_dp_cb(void *arg) 1740 { 1741 dmu_objset_find_ctx_t *dcp = arg; 1742 dsl_pool_t *dp = dcp->dc_dp; 1743 1744 /* 1745 * We need to get a pool_config_lock here, as there are several 1746 * asssert(pool_config_held) down the stack. Getting a lock via 1747 * dsl_pool_config_enter is risky, as it might be stalled by a 1748 * pending writer. This would deadlock, as the write lock can 1749 * only be granted when our parent thread gives up the lock. 1750 * The _prio interface gives us priority over a pending writer. 1751 */ 1752 dsl_pool_config_enter_prio(dp, FTAG); 1753 1754 dmu_objset_find_dp_impl(dcp); 1755 1756 dsl_pool_config_exit(dp, FTAG); 1757 } 1758 1759 /* 1760 * Find objsets under and including ddobj, call func(ds) on each. 1761 * The order for the enumeration is completely undefined. 1762 * func is called with dsl_pool_config held. 1763 */ 1764 int 1765 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, 1766 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) 1767 { 1768 int error = 0; 1769 taskq_t *tq = NULL; 1770 int ntasks; 1771 dmu_objset_find_ctx_t *dcp; 1772 kmutex_t err_lock; 1773 1774 mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL); 1775 dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP); 1776 dcp->dc_tq = NULL; 1777 dcp->dc_dp = dp; 1778 dcp->dc_ddobj = ddobj; 1779 dcp->dc_func = func; 1780 dcp->dc_arg = arg; 1781 dcp->dc_flags = flags; 1782 dcp->dc_error_lock = &err_lock; 1783 dcp->dc_error = &error; 1784 1785 if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) { 1786 /* 1787 * In case a write lock is held we can't make use of 1788 * parallelism, as down the stack of the worker threads 1789 * the lock is asserted via dsl_pool_config_held. 1790 * In case of a read lock this is solved by getting a read 1791 * lock in each worker thread, which isn't possible in case 1792 * of a writer lock. So we fall back to the synchronous path 1793 * here. 1794 * In the future it might be possible to get some magic into 1795 * dsl_pool_config_held in a way that it returns true for 1796 * the worker threads so that a single lock held from this 1797 * thread suffices. For now, stay single threaded. 1798 */ 1799 dmu_objset_find_dp_impl(dcp); 1800 mutex_destroy(&err_lock); 1801 1802 return (error); 1803 } 1804 1805 ntasks = dmu_find_threads; 1806 if (ntasks == 0) 1807 ntasks = vdev_count_leaves(dp->dp_spa) * 4; 1808 tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks, 1809 INT_MAX, 0); 1810 if (tq == NULL) { 1811 kmem_free(dcp, sizeof (*dcp)); 1812 mutex_destroy(&err_lock); 1813 1814 return (SET_ERROR(ENOMEM)); 1815 } 1816 dcp->dc_tq = tq; 1817 1818 /* dcp will be freed by task */ 1819 (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP); 1820 1821 /* 1822 * PORTING: this code relies on the property of taskq_wait to wait 1823 * until no more tasks are queued and no more tasks are active. As 1824 * we always queue new tasks from within other tasks, task_wait 1825 * reliably waits for the full recursion to finish, even though we 1826 * enqueue new tasks after taskq_wait has been called. 1827 * On platforms other than illumos, taskq_wait may not have this 1828 * property. 1829 */ 1830 taskq_wait(tq); 1831 taskq_destroy(tq); 1832 mutex_destroy(&err_lock); 1833 1834 return (error); 1835 } 1836 1837 /* 1838 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 1839 * The dp_config_rwlock must not be held when this is called, and it 1840 * will not be held when the callback is called. 1841 * Therefore this function should only be used when the pool is not changing 1842 * (e.g. in syncing context), or the callback can deal with the possible races. 1843 */ 1844 static int 1845 dmu_objset_find_impl(spa_t *spa, const char *name, 1846 int func(const char *, void *), void *arg, int flags) 1847 { 1848 dsl_dir_t *dd; 1849 dsl_pool_t *dp = spa_get_dsl(spa); 1850 dsl_dataset_t *ds; 1851 zap_cursor_t zc; 1852 zap_attribute_t *attr; 1853 char *child; 1854 uint64_t thisobj; 1855 int err; 1856 1857 dsl_pool_config_enter(dp, FTAG); 1858 1859 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); 1860 if (err != 0) { 1861 dsl_pool_config_exit(dp, FTAG); 1862 return (err); 1863 } 1864 1865 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1866 if (dd->dd_myname[0] == '$') { 1867 dsl_dir_rele(dd, FTAG); 1868 dsl_pool_config_exit(dp, FTAG); 1869 return (0); 1870 } 1871 1872 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 1873 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1874 1875 /* 1876 * Iterate over all children. 1877 */ 1878 if (flags & DS_FIND_CHILDREN) { 1879 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1880 dsl_dir_phys(dd)->dd_child_dir_zapobj); 1881 zap_cursor_retrieve(&zc, attr) == 0; 1882 (void) zap_cursor_advance(&zc)) { 1883 ASSERT3U(attr->za_integer_length, ==, 1884 sizeof (uint64_t)); 1885 ASSERT3U(attr->za_num_integers, ==, 1); 1886 1887 child = kmem_asprintf("%s/%s", name, attr->za_name); 1888 dsl_pool_config_exit(dp, FTAG); 1889 err = dmu_objset_find_impl(spa, child, 1890 func, arg, flags); 1891 dsl_pool_config_enter(dp, FTAG); 1892 strfree(child); 1893 if (err != 0) 1894 break; 1895 } 1896 zap_cursor_fini(&zc); 1897 1898 if (err != 0) { 1899 dsl_dir_rele(dd, FTAG); 1900 dsl_pool_config_exit(dp, FTAG); 1901 kmem_free(attr, sizeof (zap_attribute_t)); 1902 return (err); 1903 } 1904 } 1905 1906 /* 1907 * Iterate over all snapshots. 1908 */ 1909 if (flags & DS_FIND_SNAPSHOTS) { 1910 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1911 1912 if (err == 0) { 1913 uint64_t snapobj; 1914 1915 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 1916 dsl_dataset_rele(ds, FTAG); 1917 1918 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1919 zap_cursor_retrieve(&zc, attr) == 0; 1920 (void) zap_cursor_advance(&zc)) { 1921 ASSERT3U(attr->za_integer_length, ==, 1922 sizeof (uint64_t)); 1923 ASSERT3U(attr->za_num_integers, ==, 1); 1924 1925 child = kmem_asprintf("%s@%s", 1926 name, attr->za_name); 1927 dsl_pool_config_exit(dp, FTAG); 1928 err = func(child, arg); 1929 dsl_pool_config_enter(dp, FTAG); 1930 strfree(child); 1931 if (err != 0) 1932 break; 1933 } 1934 zap_cursor_fini(&zc); 1935 } 1936 } 1937 1938 dsl_dir_rele(dd, FTAG); 1939 kmem_free(attr, sizeof (zap_attribute_t)); 1940 dsl_pool_config_exit(dp, FTAG); 1941 1942 if (err != 0) 1943 return (err); 1944 1945 /* Apply to self. */ 1946 return (func(name, arg)); 1947 } 1948 1949 /* 1950 * See comment above dmu_objset_find_impl(). 1951 */ 1952 int 1953 dmu_objset_find(char *name, int func(const char *, void *), void *arg, 1954 int flags) 1955 { 1956 spa_t *spa; 1957 int error; 1958 1959 error = spa_open(name, &spa, FTAG); 1960 if (error != 0) 1961 return (error); 1962 error = dmu_objset_find_impl(spa, name, func, arg, flags); 1963 spa_close(spa, FTAG); 1964 return (error); 1965 } 1966 1967 void 1968 dmu_objset_set_user(objset_t *os, void *user_ptr) 1969 { 1970 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1971 os->os_user_ptr = user_ptr; 1972 } 1973 1974 void * 1975 dmu_objset_get_user(objset_t *os) 1976 { 1977 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1978 return (os->os_user_ptr); 1979 } 1980 1981 /* 1982 * Determine name of filesystem, given name of snapshot. 1983 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes 1984 */ 1985 int 1986 dmu_fsname(const char *snapname, char *buf) 1987 { 1988 char *atp = strchr(snapname, '@'); 1989 if (atp == NULL) 1990 return (SET_ERROR(EINVAL)); 1991 if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN) 1992 return (SET_ERROR(ENAMETOOLONG)); 1993 (void) strlcpy(buf, snapname, atp - snapname + 1); 1994 return (0); 1995 } 1996