1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/cred.h> 27 #include <sys/zfs_context.h> 28 #include <sys/dmu_objset.h> 29 #include <sys/dsl_dir.h> 30 #include <sys/dsl_dataset.h> 31 #include <sys/dsl_prop.h> 32 #include <sys/dsl_pool.h> 33 #include <sys/dsl_synctask.h> 34 #include <sys/dsl_deleg.h> 35 #include <sys/dnode.h> 36 #include <sys/dbuf.h> 37 #include <sys/zvol.h> 38 #include <sys/dmu_tx.h> 39 #include <sys/zap.h> 40 #include <sys/zil.h> 41 #include <sys/dmu_impl.h> 42 #include <sys/zfs_ioctl.h> 43 44 spa_t * 45 dmu_objset_spa(objset_t *os) 46 { 47 return (os->os_spa); 48 } 49 50 zilog_t * 51 dmu_objset_zil(objset_t *os) 52 { 53 return (os->os_zil); 54 } 55 56 dsl_pool_t * 57 dmu_objset_pool(objset_t *os) 58 { 59 dsl_dataset_t *ds; 60 61 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) 62 return (ds->ds_dir->dd_pool); 63 else 64 return (spa_get_dsl(os->os_spa)); 65 } 66 67 dsl_dataset_t * 68 dmu_objset_ds(objset_t *os) 69 { 70 return (os->os_dsl_dataset); 71 } 72 73 dmu_objset_type_t 74 dmu_objset_type(objset_t *os) 75 { 76 return (os->os_phys->os_type); 77 } 78 79 void 80 dmu_objset_name(objset_t *os, char *buf) 81 { 82 dsl_dataset_name(os->os_dsl_dataset, buf); 83 } 84 85 uint64_t 86 dmu_objset_id(objset_t *os) 87 { 88 dsl_dataset_t *ds = os->os_dsl_dataset; 89 90 return (ds ? ds->ds_object : 0); 91 } 92 93 uint64_t 94 dmu_objset_logbias(objset_t *os) 95 { 96 return (os->os_logbias); 97 } 98 99 static void 100 checksum_changed_cb(void *arg, uint64_t newval) 101 { 102 objset_t *os = arg; 103 104 /* 105 * Inheritance should have been done by now. 106 */ 107 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 108 109 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 110 } 111 112 static void 113 compression_changed_cb(void *arg, uint64_t newval) 114 { 115 objset_t *os = arg; 116 117 /* 118 * Inheritance and range checking should have been done by now. 119 */ 120 ASSERT(newval != ZIO_COMPRESS_INHERIT); 121 122 os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE); 123 } 124 125 static void 126 copies_changed_cb(void *arg, uint64_t newval) 127 { 128 objset_t *os = arg; 129 130 /* 131 * Inheritance and range checking should have been done by now. 132 */ 133 ASSERT(newval > 0); 134 ASSERT(newval <= spa_max_replication(os->os_spa)); 135 136 os->os_copies = newval; 137 } 138 139 static void 140 dedup_changed_cb(void *arg, uint64_t newval) 141 { 142 objset_t *os = arg; 143 spa_t *spa = os->os_spa; 144 enum zio_checksum checksum; 145 146 /* 147 * Inheritance should have been done by now. 148 */ 149 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 150 151 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); 152 153 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; 154 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); 155 } 156 157 static void 158 primary_cache_changed_cb(void *arg, uint64_t newval) 159 { 160 objset_t *os = arg; 161 162 /* 163 * Inheritance and range checking should have been done by now. 164 */ 165 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 166 newval == ZFS_CACHE_METADATA); 167 168 os->os_primary_cache = newval; 169 } 170 171 static void 172 secondary_cache_changed_cb(void *arg, uint64_t newval) 173 { 174 objset_t *os = arg; 175 176 /* 177 * Inheritance and range checking should have been done by now. 178 */ 179 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 180 newval == ZFS_CACHE_METADATA); 181 182 os->os_secondary_cache = newval; 183 } 184 185 static void 186 logbias_changed_cb(void *arg, uint64_t newval) 187 { 188 objset_t *os = arg; 189 190 ASSERT(newval == ZFS_LOGBIAS_LATENCY || 191 newval == ZFS_LOGBIAS_THROUGHPUT); 192 os->os_logbias = newval; 193 if (os->os_zil) 194 zil_set_logbias(os->os_zil, newval); 195 } 196 197 void 198 dmu_objset_byteswap(void *buf, size_t size) 199 { 200 objset_phys_t *osp = buf; 201 202 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); 203 dnode_byteswap(&osp->os_meta_dnode); 204 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 205 osp->os_type = BSWAP_64(osp->os_type); 206 osp->os_flags = BSWAP_64(osp->os_flags); 207 if (size == sizeof (objset_phys_t)) { 208 dnode_byteswap(&osp->os_userused_dnode); 209 dnode_byteswap(&osp->os_groupused_dnode); 210 } 211 } 212 213 int 214 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 215 objset_t **osp) 216 { 217 objset_t *os; 218 int i, err; 219 220 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 221 222 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); 223 os->os_dsl_dataset = ds; 224 os->os_spa = spa; 225 os->os_rootbp = bp; 226 if (!BP_IS_HOLE(os->os_rootbp)) { 227 uint32_t aflags = ARC_WAIT; 228 zbookmark_t zb; 229 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 230 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 231 232 if (DMU_OS_IS_L2CACHEABLE(os)) 233 aflags |= ARC_L2CACHE; 234 235 dprintf_bp(os->os_rootbp, "reading %s", ""); 236 /* 237 * NB: when bprewrite scrub can change the bp, 238 * and this is called from dmu_objset_open_ds_os, the bp 239 * could change, and we'll need a lock. 240 */ 241 err = arc_read_nolock(NULL, spa, os->os_rootbp, 242 arc_getbuf_func, &os->os_phys_buf, 243 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 244 if (err) { 245 kmem_free(os, sizeof (objset_t)); 246 /* convert checksum errors into IO errors */ 247 if (err == ECKSUM) 248 err = EIO; 249 return (err); 250 } 251 252 /* Increase the blocksize if we are permitted. */ 253 if (spa_version(spa) >= SPA_VERSION_USERSPACE && 254 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { 255 arc_buf_t *buf = arc_buf_alloc(spa, 256 sizeof (objset_phys_t), &os->os_phys_buf, 257 ARC_BUFC_METADATA); 258 bzero(buf->b_data, sizeof (objset_phys_t)); 259 bcopy(os->os_phys_buf->b_data, buf->b_data, 260 arc_buf_size(os->os_phys_buf)); 261 (void) arc_buf_remove_ref(os->os_phys_buf, 262 &os->os_phys_buf); 263 os->os_phys_buf = buf; 264 } 265 266 os->os_phys = os->os_phys_buf->b_data; 267 os->os_flags = os->os_phys->os_flags; 268 } else { 269 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? 270 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; 271 os->os_phys_buf = arc_buf_alloc(spa, size, 272 &os->os_phys_buf, ARC_BUFC_METADATA); 273 os->os_phys = os->os_phys_buf->b_data; 274 bzero(os->os_phys, size); 275 } 276 277 /* 278 * Note: the changed_cb will be called once before the register 279 * func returns, thus changing the checksum/compression from the 280 * default (fletcher2/off). Snapshots don't need to know about 281 * checksum/compression/copies. 282 */ 283 if (ds) { 284 err = dsl_prop_register(ds, "primarycache", 285 primary_cache_changed_cb, os); 286 if (err == 0) 287 err = dsl_prop_register(ds, "secondarycache", 288 secondary_cache_changed_cb, os); 289 if (!dsl_dataset_is_snapshot(ds)) { 290 if (err == 0) 291 err = dsl_prop_register(ds, "checksum", 292 checksum_changed_cb, os); 293 if (err == 0) 294 err = dsl_prop_register(ds, "compression", 295 compression_changed_cb, os); 296 if (err == 0) 297 err = dsl_prop_register(ds, "copies", 298 copies_changed_cb, os); 299 if (err == 0) 300 err = dsl_prop_register(ds, "dedup", 301 dedup_changed_cb, os); 302 if (err == 0) 303 err = dsl_prop_register(ds, "logbias", 304 logbias_changed_cb, os); 305 } 306 if (err) { 307 VERIFY(arc_buf_remove_ref(os->os_phys_buf, 308 &os->os_phys_buf) == 1); 309 kmem_free(os, sizeof (objset_t)); 310 return (err); 311 } 312 } else if (ds == NULL) { 313 /* It's the meta-objset. */ 314 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 315 os->os_compress = ZIO_COMPRESS_LZJB; 316 os->os_copies = spa_max_replication(spa); 317 os->os_dedup_checksum = ZIO_CHECKSUM_OFF; 318 os->os_dedup_verify = 0; 319 os->os_logbias = 0; 320 os->os_primary_cache = ZFS_CACHE_ALL; 321 os->os_secondary_cache = ZFS_CACHE_ALL; 322 } 323 324 os->os_zil_header = os->os_phys->os_zil_header; 325 os->os_zil = zil_alloc(os, &os->os_zil_header); 326 327 for (i = 0; i < TXG_SIZE; i++) { 328 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), 329 offsetof(dnode_t, dn_dirty_link[i])); 330 list_create(&os->os_free_dnodes[i], sizeof (dnode_t), 331 offsetof(dnode_t, dn_dirty_link[i])); 332 } 333 list_create(&os->os_dnodes, sizeof (dnode_t), 334 offsetof(dnode_t, dn_link)); 335 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 336 offsetof(dmu_buf_impl_t, db_link)); 337 338 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); 339 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 340 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 341 342 os->os_meta_dnode = dnode_special_open(os, 343 &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT); 344 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { 345 os->os_userused_dnode = dnode_special_open(os, 346 &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT); 347 os->os_groupused_dnode = dnode_special_open(os, 348 &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT); 349 } 350 351 /* 352 * We should be the only thread trying to do this because we 353 * have ds_opening_lock 354 */ 355 if (ds) { 356 mutex_enter(&ds->ds_lock); 357 ASSERT(ds->ds_objset == NULL); 358 ds->ds_objset = os; 359 mutex_exit(&ds->ds_lock); 360 } 361 362 *osp = os; 363 return (0); 364 } 365 366 int 367 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) 368 { 369 int err = 0; 370 371 mutex_enter(&ds->ds_opening_lock); 372 *osp = ds->ds_objset; 373 if (*osp == NULL) { 374 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 375 ds, &ds->ds_phys->ds_bp, osp); 376 } 377 mutex_exit(&ds->ds_opening_lock); 378 return (err); 379 } 380 381 /* called from zpl */ 382 int 383 dmu_objset_hold(const char *name, void *tag, objset_t **osp) 384 { 385 dsl_dataset_t *ds; 386 int err; 387 388 err = dsl_dataset_hold(name, tag, &ds); 389 if (err) 390 return (err); 391 392 err = dmu_objset_from_ds(ds, osp); 393 if (err) 394 dsl_dataset_rele(ds, tag); 395 396 return (err); 397 } 398 399 /* called from zpl */ 400 int 401 dmu_objset_own(const char *name, dmu_objset_type_t type, 402 boolean_t readonly, void *tag, objset_t **osp) 403 { 404 dsl_dataset_t *ds; 405 int err; 406 407 err = dsl_dataset_own(name, B_FALSE, tag, &ds); 408 if (err) 409 return (err); 410 411 err = dmu_objset_from_ds(ds, osp); 412 if (err) { 413 dsl_dataset_disown(ds, tag); 414 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { 415 dmu_objset_disown(*osp, tag); 416 return (EINVAL); 417 } else if (!readonly && dsl_dataset_is_snapshot(ds)) { 418 dmu_objset_disown(*osp, tag); 419 return (EROFS); 420 } 421 return (err); 422 } 423 424 void 425 dmu_objset_rele(objset_t *os, void *tag) 426 { 427 dsl_dataset_rele(os->os_dsl_dataset, tag); 428 } 429 430 void 431 dmu_objset_disown(objset_t *os, void *tag) 432 { 433 dsl_dataset_disown(os->os_dsl_dataset, tag); 434 } 435 436 int 437 dmu_objset_evict_dbufs(objset_t *os) 438 { 439 dnode_t *dn; 440 441 mutex_enter(&os->os_lock); 442 443 /* process the mdn last, since the other dnodes have holds on it */ 444 list_remove(&os->os_dnodes, os->os_meta_dnode); 445 list_insert_tail(&os->os_dnodes, os->os_meta_dnode); 446 447 /* 448 * Find the first dnode with holds. We have to do this dance 449 * because dnode_add_ref() only works if you already have a 450 * hold. If there are no holds then it has no dbufs so OK to 451 * skip. 452 */ 453 for (dn = list_head(&os->os_dnodes); 454 dn && !dnode_add_ref(dn, FTAG); 455 dn = list_next(&os->os_dnodes, dn)) 456 continue; 457 458 while (dn) { 459 dnode_t *next_dn = dn; 460 461 do { 462 next_dn = list_next(&os->os_dnodes, next_dn); 463 } while (next_dn && !dnode_add_ref(next_dn, FTAG)); 464 465 mutex_exit(&os->os_lock); 466 dnode_evict_dbufs(dn); 467 dnode_rele(dn, FTAG); 468 mutex_enter(&os->os_lock); 469 dn = next_dn; 470 } 471 mutex_exit(&os->os_lock); 472 return (list_head(&os->os_dnodes) != os->os_meta_dnode); 473 } 474 475 void 476 dmu_objset_evict(objset_t *os) 477 { 478 dsl_dataset_t *ds = os->os_dsl_dataset; 479 480 for (int t = 0; t < TXG_SIZE; t++) 481 ASSERT(!dmu_objset_is_dirty(os, t)); 482 483 if (ds) { 484 if (!dsl_dataset_is_snapshot(ds)) { 485 VERIFY(0 == dsl_prop_unregister(ds, "checksum", 486 checksum_changed_cb, os)); 487 VERIFY(0 == dsl_prop_unregister(ds, "compression", 488 compression_changed_cb, os)); 489 VERIFY(0 == dsl_prop_unregister(ds, "copies", 490 copies_changed_cb, os)); 491 VERIFY(0 == dsl_prop_unregister(ds, "dedup", 492 dedup_changed_cb, os)); 493 VERIFY(0 == dsl_prop_unregister(ds, "logbias", 494 logbias_changed_cb, os)); 495 } 496 VERIFY(0 == dsl_prop_unregister(ds, "primarycache", 497 primary_cache_changed_cb, os)); 498 VERIFY(0 == dsl_prop_unregister(ds, "secondarycache", 499 secondary_cache_changed_cb, os)); 500 } 501 502 /* 503 * We should need only a single pass over the dnode list, since 504 * nothing can be added to the list at this point. 505 */ 506 (void) dmu_objset_evict_dbufs(os); 507 508 dnode_special_close(os->os_meta_dnode); 509 if (os->os_userused_dnode) { 510 dnode_special_close(os->os_userused_dnode); 511 dnode_special_close(os->os_groupused_dnode); 512 } 513 zil_free(os->os_zil); 514 515 ASSERT3P(list_head(&os->os_dnodes), ==, NULL); 516 517 VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf) == 1); 518 mutex_destroy(&os->os_lock); 519 mutex_destroy(&os->os_obj_lock); 520 mutex_destroy(&os->os_user_ptr_lock); 521 kmem_free(os, sizeof (objset_t)); 522 } 523 524 timestruc_t 525 dmu_objset_snap_cmtime(objset_t *os) 526 { 527 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); 528 } 529 530 /* called from dsl for meta-objset */ 531 objset_t * 532 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 533 dmu_objset_type_t type, dmu_tx_t *tx) 534 { 535 objset_t *os; 536 dnode_t *mdn; 537 538 ASSERT(dmu_tx_is_syncing(tx)); 539 if (ds) 540 mutex_enter(&ds->ds_opening_lock); 541 VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &os)); 542 if (ds) 543 mutex_exit(&ds->ds_opening_lock); 544 mdn = os->os_meta_dnode; 545 546 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 547 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 548 549 /* 550 * We don't want to have to increase the meta-dnode's nlevels 551 * later, because then we could do it in quescing context while 552 * we are also accessing it in open context. 553 * 554 * This precaution is not necessary for the MOS (ds == NULL), 555 * because the MOS is only updated in syncing context. 556 * This is most fortunate: the MOS is the only objset that 557 * needs to be synced multiple times as spa_sync() iterates 558 * to convergence, so minimizing its dn_nlevels matters. 559 */ 560 if (ds != NULL) { 561 int levels = 1; 562 563 /* 564 * Determine the number of levels necessary for the meta-dnode 565 * to contain DN_MAX_OBJECT dnodes. 566 */ 567 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + 568 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 569 DN_MAX_OBJECT * sizeof (dnode_phys_t)) 570 levels++; 571 572 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 573 mdn->dn_nlevels = levels; 574 } 575 576 ASSERT(type != DMU_OST_NONE); 577 ASSERT(type != DMU_OST_ANY); 578 ASSERT(type < DMU_OST_NUMTYPES); 579 os->os_phys->os_type = type; 580 if (dmu_objset_userused_enabled(os)) { 581 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 582 os->os_flags = os->os_phys->os_flags; 583 } 584 585 dsl_dataset_dirty(ds, tx); 586 587 return (os); 588 } 589 590 struct oscarg { 591 void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); 592 void *userarg; 593 dsl_dataset_t *clone_origin; 594 const char *lastname; 595 dmu_objset_type_t type; 596 uint64_t flags; 597 }; 598 599 /*ARGSUSED*/ 600 static int 601 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx) 602 { 603 dsl_dir_t *dd = arg1; 604 struct oscarg *oa = arg2; 605 objset_t *mos = dd->dd_pool->dp_meta_objset; 606 int err; 607 uint64_t ddobj; 608 609 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, 610 oa->lastname, sizeof (uint64_t), 1, &ddobj); 611 if (err != ENOENT) 612 return (err ? err : EEXIST); 613 614 if (oa->clone_origin != NULL) { 615 /* You can't clone across pools. */ 616 if (oa->clone_origin->ds_dir->dd_pool != dd->dd_pool) 617 return (EXDEV); 618 619 /* You can only clone snapshots, not the head datasets. */ 620 if (!dsl_dataset_is_snapshot(oa->clone_origin)) 621 return (EINVAL); 622 } 623 624 return (0); 625 } 626 627 static void 628 dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 629 { 630 dsl_dir_t *dd = arg1; 631 struct oscarg *oa = arg2; 632 uint64_t dsobj; 633 634 ASSERT(dmu_tx_is_syncing(tx)); 635 636 dsobj = dsl_dataset_create_sync(dd, oa->lastname, 637 oa->clone_origin, oa->flags, cr, tx); 638 639 if (oa->clone_origin == NULL) { 640 dsl_dataset_t *ds; 641 blkptr_t *bp; 642 objset_t *os; 643 644 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj, 645 FTAG, &ds)); 646 bp = dsl_dataset_get_blkptr(ds); 647 ASSERT(BP_IS_HOLE(bp)); 648 649 os = dmu_objset_create_impl(dsl_dataset_get_spa(ds), 650 ds, bp, oa->type, tx); 651 652 if (oa->userfunc) 653 oa->userfunc(os, oa->userarg, cr, tx); 654 dsl_dataset_rele(ds, FTAG); 655 } 656 657 spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa, 658 tx, cr, "dataset = %llu", dsobj); 659 } 660 661 int 662 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 663 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 664 { 665 dsl_dir_t *pdd; 666 const char *tail; 667 int err = 0; 668 struct oscarg oa = { 0 }; 669 670 ASSERT(strchr(name, '@') == NULL); 671 err = dsl_dir_open(name, FTAG, &pdd, &tail); 672 if (err) 673 return (err); 674 if (tail == NULL) { 675 dsl_dir_close(pdd, FTAG); 676 return (EEXIST); 677 } 678 679 oa.userfunc = func; 680 oa.userarg = arg; 681 oa.lastname = tail; 682 oa.type = type; 683 oa.flags = flags; 684 685 err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check, 686 dmu_objset_create_sync, pdd, &oa, 5); 687 dsl_dir_close(pdd, FTAG); 688 return (err); 689 } 690 691 int 692 dmu_objset_clone(const char *name, dsl_dataset_t *clone_origin, uint64_t flags) 693 { 694 dsl_dir_t *pdd; 695 const char *tail; 696 int err = 0; 697 struct oscarg oa = { 0 }; 698 699 ASSERT(strchr(name, '@') == NULL); 700 err = dsl_dir_open(name, FTAG, &pdd, &tail); 701 if (err) 702 return (err); 703 if (tail == NULL) { 704 dsl_dir_close(pdd, FTAG); 705 return (EEXIST); 706 } 707 708 oa.lastname = tail; 709 oa.clone_origin = clone_origin; 710 oa.flags = flags; 711 712 err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check, 713 dmu_objset_create_sync, pdd, &oa, 5); 714 dsl_dir_close(pdd, FTAG); 715 return (err); 716 } 717 718 int 719 dmu_objset_destroy(const char *name, boolean_t defer) 720 { 721 dsl_dataset_t *ds; 722 int error; 723 724 /* 725 * dsl_dataset_destroy() can free any claimed-but-unplayed 726 * intent log, but if there is an active log, it has blocks that 727 * are allocated, but may not yet be reflected in the on-disk 728 * structure. Only the ZIL knows how to free them, so we have 729 * to call into it here. 730 */ 731 error = dsl_dataset_own(name, B_TRUE, FTAG, &ds); 732 if (error == 0) { 733 objset_t *os; 734 if (dmu_objset_from_ds(ds, &os) == 0) 735 zil_destroy(dmu_objset_zil(os), B_FALSE); 736 error = dsl_dataset_destroy(ds, FTAG, defer); 737 /* dsl_dataset_destroy() closes the ds. */ 738 } 739 740 return (error); 741 } 742 743 struct snaparg { 744 dsl_sync_task_group_t *dstg; 745 char *snapname; 746 char failed[MAXPATHLEN]; 747 boolean_t checkperms; 748 nvlist_t *props; 749 }; 750 751 static int 752 snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx) 753 { 754 objset_t *os = arg1; 755 struct snaparg *sn = arg2; 756 757 /* The props have already been checked by zfs_check_userprops(). */ 758 759 return (dsl_dataset_snapshot_check(os->os_dsl_dataset, 760 sn->snapname, tx)); 761 } 762 763 static void 764 snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 765 { 766 objset_t *os = arg1; 767 dsl_dataset_t *ds = os->os_dsl_dataset; 768 struct snaparg *sn = arg2; 769 770 dsl_dataset_snapshot_sync(ds, sn->snapname, cr, tx); 771 772 if (sn->props) 773 dsl_props_set_sync(ds->ds_prev, sn->props, cr, tx); 774 } 775 776 static int 777 dmu_objset_snapshot_one(char *name, void *arg) 778 { 779 struct snaparg *sn = arg; 780 objset_t *os; 781 int err; 782 783 (void) strcpy(sn->failed, name); 784 785 /* 786 * Check permissions only when requested. This only applies when 787 * doing a recursive snapshot. The permission checks for the starting 788 * dataset have already been performed in zfs_secpolicy_snapshot() 789 */ 790 if (sn->checkperms == B_TRUE && 791 (err = zfs_secpolicy_snapshot_perms(name, CRED()))) 792 return (err); 793 794 err = dmu_objset_hold(name, sn, &os); 795 if (err != 0) 796 return (err); 797 798 /* If the objset is in an inconsistent state, return busy */ 799 if (os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) { 800 dmu_objset_rele(os, sn); 801 return (EBUSY); 802 } 803 804 /* 805 * NB: we need to wait for all in-flight changes to get to disk, 806 * so that we snapshot those changes. zil_suspend does this as 807 * a side effect. 808 */ 809 err = zil_suspend(dmu_objset_zil(os)); 810 if (err == 0) { 811 dsl_sync_task_create(sn->dstg, snapshot_check, 812 snapshot_sync, os, sn, 3); 813 } else { 814 dmu_objset_rele(os, sn); 815 } 816 817 return (err); 818 } 819 820 int 821 dmu_objset_snapshot(char *fsname, char *snapname, 822 nvlist_t *props, boolean_t recursive) 823 { 824 dsl_sync_task_t *dst; 825 struct snaparg sn; 826 spa_t *spa; 827 int err; 828 829 (void) strcpy(sn.failed, fsname); 830 831 err = spa_open(fsname, &spa, FTAG); 832 if (err) 833 return (err); 834 835 sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 836 sn.snapname = snapname; 837 sn.props = props; 838 839 if (recursive) { 840 sn.checkperms = B_TRUE; 841 err = dmu_objset_find(fsname, 842 dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN); 843 } else { 844 sn.checkperms = B_FALSE; 845 err = dmu_objset_snapshot_one(fsname, &sn); 846 } 847 848 if (err == 0) 849 err = dsl_sync_task_group_wait(sn.dstg); 850 851 for (dst = list_head(&sn.dstg->dstg_tasks); dst; 852 dst = list_next(&sn.dstg->dstg_tasks, dst)) { 853 objset_t *os = dst->dst_arg1; 854 dsl_dataset_t *ds = os->os_dsl_dataset; 855 if (dst->dst_err) 856 dsl_dataset_name(ds, sn.failed); 857 zil_resume(dmu_objset_zil(os)); 858 dmu_objset_rele(os, &sn); 859 } 860 861 if (err) 862 (void) strcpy(fsname, sn.failed); 863 dsl_sync_task_group_destroy(sn.dstg); 864 spa_close(spa, FTAG); 865 return (err); 866 } 867 868 static void 869 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) 870 { 871 dnode_t *dn; 872 873 while (dn = list_head(list)) { 874 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 875 ASSERT(dn->dn_dbuf->db_data_pending); 876 /* 877 * Initialize dn_zio outside dnode_sync() because the 878 * meta-dnode needs to set it ouside dnode_sync(). 879 */ 880 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 881 ASSERT(dn->dn_zio); 882 883 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 884 list_remove(list, dn); 885 886 if (newlist) { 887 (void) dnode_add_ref(dn, newlist); 888 list_insert_tail(newlist, dn); 889 } 890 891 dnode_sync(dn, tx); 892 } 893 } 894 895 /* ARGSUSED */ 896 static void 897 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) 898 { 899 blkptr_t *bp = zio->io_bp; 900 objset_t *os = arg; 901 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 902 903 ASSERT(bp == os->os_rootbp); 904 ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET); 905 ASSERT(BP_GET_LEVEL(bp) == 0); 906 907 /* 908 * Update rootbp fill count: it should be the number of objects 909 * allocated in the object set (not counting the "special" 910 * objects that are stored in the objset_phys_t -- the meta 911 * dnode and user/group accounting objects). 912 */ 913 bp->blk_fill = 0; 914 for (int i = 0; i < dnp->dn_nblkptr; i++) 915 bp->blk_fill += dnp->dn_blkptr[i].blk_fill; 916 } 917 918 /* ARGSUSED */ 919 static void 920 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) 921 { 922 blkptr_t *bp = zio->io_bp; 923 blkptr_t *bp_orig = &zio->io_bp_orig; 924 objset_t *os = arg; 925 926 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 927 ASSERT(BP_EQUAL(bp, bp_orig)); 928 } else { 929 dsl_dataset_t *ds = os->os_dsl_dataset; 930 dmu_tx_t *tx = os->os_synctx; 931 932 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 933 dsl_dataset_block_born(ds, bp, tx); 934 } 935 } 936 937 /* called from dsl */ 938 void 939 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) 940 { 941 int txgoff; 942 zbookmark_t zb; 943 zio_prop_t zp; 944 zio_t *zio; 945 list_t *list; 946 list_t *newlist = NULL; 947 dbuf_dirty_record_t *dr; 948 949 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 950 951 ASSERT(dmu_tx_is_syncing(tx)); 952 /* XXX the write_done callback should really give us the tx... */ 953 os->os_synctx = tx; 954 955 if (os->os_dsl_dataset == NULL) { 956 /* 957 * This is the MOS. If we have upgraded, 958 * spa_max_replication() could change, so reset 959 * os_copies here. 960 */ 961 os->os_copies = spa_max_replication(os->os_spa); 962 } 963 964 /* 965 * Create the root block IO 966 */ 967 arc_release(os->os_phys_buf, &os->os_phys_buf); 968 969 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 970 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 971 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 972 973 dmu_write_policy(os, NULL, 0, 0, &zp); 974 975 zio = arc_write(pio, os->os_spa, tx->tx_txg, 976 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp, 977 dmu_objset_write_ready, dmu_objset_write_done, os, 978 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 979 980 /* 981 * Sync special dnodes - the parent IO for the sync is the root block 982 */ 983 os->os_meta_dnode->dn_zio = zio; 984 dnode_sync(os->os_meta_dnode, tx); 985 986 os->os_phys->os_flags = os->os_flags; 987 988 if (os->os_userused_dnode && 989 os->os_userused_dnode->dn_type != DMU_OT_NONE) { 990 os->os_userused_dnode->dn_zio = zio; 991 dnode_sync(os->os_userused_dnode, tx); 992 os->os_groupused_dnode->dn_zio = zio; 993 dnode_sync(os->os_groupused_dnode, tx); 994 } 995 996 txgoff = tx->tx_txg & TXG_MASK; 997 998 if (dmu_objset_userused_enabled(os)) { 999 newlist = &os->os_synced_dnodes; 1000 /* 1001 * We must create the list here because it uses the 1002 * dn_dirty_link[] of this txg. 1003 */ 1004 list_create(newlist, sizeof (dnode_t), 1005 offsetof(dnode_t, dn_dirty_link[txgoff])); 1006 } 1007 1008 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); 1009 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); 1010 1011 list = &os->os_meta_dnode->dn_dirty_records[txgoff]; 1012 while (dr = list_head(list)) { 1013 ASSERT(dr->dr_dbuf->db_level == 0); 1014 list_remove(list, dr); 1015 if (dr->dr_zio) 1016 zio_nowait(dr->dr_zio); 1017 } 1018 /* 1019 * Free intent log blocks up to this tx. 1020 */ 1021 zil_sync(os->os_zil, tx); 1022 os->os_phys->os_zil_header = os->os_zil_header; 1023 zio_nowait(zio); 1024 } 1025 1026 boolean_t 1027 dmu_objset_is_dirty(objset_t *os, uint64_t txg) 1028 { 1029 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) || 1030 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK])); 1031 } 1032 1033 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; 1034 1035 void 1036 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) 1037 { 1038 used_cbs[ost] = cb; 1039 } 1040 1041 boolean_t 1042 dmu_objset_userused_enabled(objset_t *os) 1043 { 1044 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && 1045 used_cbs[os->os_phys->os_type] && 1046 os->os_userused_dnode); 1047 } 1048 1049 static void 1050 do_userquota_callback(objset_t *os, dnode_phys_t *dnp, 1051 boolean_t subtract, dmu_tx_t *tx) 1052 { 1053 static const char zerobuf[DN_MAX_BONUSLEN] = {0}; 1054 uint64_t user, group; 1055 1056 ASSERT(dnp->dn_type != 0 || 1057 (bcmp(DN_BONUS(dnp), zerobuf, DN_MAX_BONUSLEN) == 0 && 1058 DN_USED_BYTES(dnp) == 0)); 1059 1060 if ((dnp->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) && 1061 0 == used_cbs[os->os_phys->os_type](dnp->dn_bonustype, 1062 DN_BONUS(dnp), &user, &group)) { 1063 int64_t delta = DNODE_SIZE + DN_USED_BYTES(dnp); 1064 if (subtract) 1065 delta = -delta; 1066 VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT, 1067 user, delta, tx)); 1068 VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT, 1069 group, delta, tx)); 1070 } 1071 } 1072 1073 void 1074 dmu_objset_do_userquota_callbacks(objset_t *os, dmu_tx_t *tx) 1075 { 1076 dnode_t *dn; 1077 list_t *list = &os->os_synced_dnodes; 1078 1079 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os)); 1080 1081 while (dn = list_head(list)) { 1082 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); 1083 ASSERT(dn->dn_oldphys); 1084 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || 1085 dn->dn_phys->dn_flags & 1086 DNODE_FLAG_USERUSED_ACCOUNTED); 1087 1088 /* Allocate the user/groupused objects if necessary. */ 1089 if (os->os_userused_dnode->dn_type == DMU_OT_NONE) { 1090 VERIFY(0 == zap_create_claim(os, 1091 DMU_USERUSED_OBJECT, 1092 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1093 VERIFY(0 == zap_create_claim(os, 1094 DMU_GROUPUSED_OBJECT, 1095 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1096 } 1097 1098 /* 1099 * We intentionally modify the zap object even if the 1100 * net delta (due to phys-oldphys) is zero. Otherwise 1101 * the block of the zap obj could be shared between 1102 * datasets but need to be different between them after 1103 * a bprewrite. 1104 */ 1105 do_userquota_callback(os, dn->dn_oldphys, B_TRUE, tx); 1106 do_userquota_callback(os, dn->dn_phys, B_FALSE, tx); 1107 1108 /* 1109 * The mutex is needed here for interlock with dnode_allocate. 1110 */ 1111 mutex_enter(&dn->dn_mtx); 1112 zio_buf_free(dn->dn_oldphys, sizeof (dnode_phys_t)); 1113 dn->dn_oldphys = NULL; 1114 mutex_exit(&dn->dn_mtx); 1115 1116 list_remove(list, dn); 1117 dnode_rele(dn, list); 1118 } 1119 } 1120 1121 boolean_t 1122 dmu_objset_userspace_present(objset_t *os) 1123 { 1124 return (os->os_phys->os_flags & 1125 OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1126 } 1127 1128 int 1129 dmu_objset_userspace_upgrade(objset_t *os) 1130 { 1131 uint64_t obj; 1132 int err = 0; 1133 1134 if (dmu_objset_userspace_present(os)) 1135 return (0); 1136 if (!dmu_objset_userused_enabled(os)) 1137 return (ENOTSUP); 1138 if (dmu_objset_is_snapshot(os)) 1139 return (EINVAL); 1140 1141 /* 1142 * We simply need to mark every object dirty, so that it will be 1143 * synced out and now accounted. If this is called 1144 * concurrently, or if we already did some work before crashing, 1145 * that's fine, since we track each object's accounted state 1146 * independently. 1147 */ 1148 1149 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 1150 dmu_tx_t *tx; 1151 dmu_buf_t *db; 1152 int objerr; 1153 1154 if (issig(JUSTLOOKING) && issig(FORREAL)) 1155 return (EINTR); 1156 1157 objerr = dmu_bonus_hold(os, obj, FTAG, &db); 1158 if (objerr) 1159 continue; 1160 tx = dmu_tx_create(os); 1161 dmu_tx_hold_bonus(tx, obj); 1162 objerr = dmu_tx_assign(tx, TXG_WAIT); 1163 if (objerr) { 1164 dmu_tx_abort(tx); 1165 continue; 1166 } 1167 dmu_buf_will_dirty(db, tx); 1168 dmu_buf_rele(db, FTAG); 1169 dmu_tx_commit(tx); 1170 } 1171 1172 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 1173 txg_wait_synced(dmu_objset_pool(os), 0); 1174 return (0); 1175 } 1176 1177 void 1178 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 1179 uint64_t *usedobjsp, uint64_t *availobjsp) 1180 { 1181 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, 1182 usedobjsp, availobjsp); 1183 } 1184 1185 uint64_t 1186 dmu_objset_fsid_guid(objset_t *os) 1187 { 1188 return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); 1189 } 1190 1191 void 1192 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 1193 { 1194 stat->dds_type = os->os_phys->os_type; 1195 if (os->os_dsl_dataset) 1196 dsl_dataset_fast_stat(os->os_dsl_dataset, stat); 1197 } 1198 1199 void 1200 dmu_objset_stats(objset_t *os, nvlist_t *nv) 1201 { 1202 ASSERT(os->os_dsl_dataset || 1203 os->os_phys->os_type == DMU_OST_META); 1204 1205 if (os->os_dsl_dataset != NULL) 1206 dsl_dataset_stats(os->os_dsl_dataset, nv); 1207 1208 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 1209 os->os_phys->os_type); 1210 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, 1211 dmu_objset_userspace_present(os)); 1212 } 1213 1214 int 1215 dmu_objset_is_snapshot(objset_t *os) 1216 { 1217 if (os->os_dsl_dataset != NULL) 1218 return (dsl_dataset_is_snapshot(os->os_dsl_dataset)); 1219 else 1220 return (B_FALSE); 1221 } 1222 1223 int 1224 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 1225 boolean_t *conflict) 1226 { 1227 dsl_dataset_t *ds = os->os_dsl_dataset; 1228 uint64_t ignored; 1229 1230 if (ds->ds_phys->ds_snapnames_zapobj == 0) 1231 return (ENOENT); 1232 1233 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 1234 ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST, 1235 real, maxlen, conflict)); 1236 } 1237 1238 int 1239 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 1240 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 1241 { 1242 dsl_dataset_t *ds = os->os_dsl_dataset; 1243 zap_cursor_t cursor; 1244 zap_attribute_t attr; 1245 1246 if (ds->ds_phys->ds_snapnames_zapobj == 0) 1247 return (ENOENT); 1248 1249 zap_cursor_init_serialized(&cursor, 1250 ds->ds_dir->dd_pool->dp_meta_objset, 1251 ds->ds_phys->ds_snapnames_zapobj, *offp); 1252 1253 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1254 zap_cursor_fini(&cursor); 1255 return (ENOENT); 1256 } 1257 1258 if (strlen(attr.za_name) + 1 > namelen) { 1259 zap_cursor_fini(&cursor); 1260 return (ENAMETOOLONG); 1261 } 1262 1263 (void) strcpy(name, attr.za_name); 1264 if (idp) 1265 *idp = attr.za_first_integer; 1266 if (case_conflict) 1267 *case_conflict = attr.za_normalization_conflict; 1268 zap_cursor_advance(&cursor); 1269 *offp = zap_cursor_serialize(&cursor); 1270 zap_cursor_fini(&cursor); 1271 1272 return (0); 1273 } 1274 1275 int 1276 dmu_dir_list_next(objset_t *os, int namelen, char *name, 1277 uint64_t *idp, uint64_t *offp) 1278 { 1279 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; 1280 zap_cursor_t cursor; 1281 zap_attribute_t attr; 1282 1283 /* there is no next dir on a snapshot! */ 1284 if (os->os_dsl_dataset->ds_object != 1285 dd->dd_phys->dd_head_dataset_obj) 1286 return (ENOENT); 1287 1288 zap_cursor_init_serialized(&cursor, 1289 dd->dd_pool->dp_meta_objset, 1290 dd->dd_phys->dd_child_dir_zapobj, *offp); 1291 1292 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1293 zap_cursor_fini(&cursor); 1294 return (ENOENT); 1295 } 1296 1297 if (strlen(attr.za_name) + 1 > namelen) { 1298 zap_cursor_fini(&cursor); 1299 return (ENAMETOOLONG); 1300 } 1301 1302 (void) strcpy(name, attr.za_name); 1303 if (idp) 1304 *idp = attr.za_first_integer; 1305 zap_cursor_advance(&cursor); 1306 *offp = zap_cursor_serialize(&cursor); 1307 zap_cursor_fini(&cursor); 1308 1309 return (0); 1310 } 1311 1312 struct findarg { 1313 int (*func)(char *, void *); 1314 void *arg; 1315 }; 1316 1317 /* ARGSUSED */ 1318 static int 1319 findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 1320 { 1321 struct findarg *fa = arg; 1322 return (fa->func((char *)dsname, fa->arg)); 1323 } 1324 1325 /* 1326 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 1327 * Perhaps change all callers to use dmu_objset_find_spa()? 1328 */ 1329 int 1330 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags) 1331 { 1332 struct findarg fa; 1333 fa.func = func; 1334 fa.arg = arg; 1335 return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags)); 1336 } 1337 1338 /* 1339 * Find all objsets under name, call func on each 1340 */ 1341 int 1342 dmu_objset_find_spa(spa_t *spa, const char *name, 1343 int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags) 1344 { 1345 dsl_dir_t *dd; 1346 dsl_pool_t *dp; 1347 dsl_dataset_t *ds; 1348 zap_cursor_t zc; 1349 zap_attribute_t *attr; 1350 char *child; 1351 uint64_t thisobj; 1352 int err; 1353 1354 if (name == NULL) 1355 name = spa_name(spa); 1356 err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL); 1357 if (err) 1358 return (err); 1359 1360 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1361 if (dd->dd_myname[0] == '$') { 1362 dsl_dir_close(dd, FTAG); 1363 return (0); 1364 } 1365 1366 thisobj = dd->dd_phys->dd_head_dataset_obj; 1367 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1368 dp = dd->dd_pool; 1369 1370 /* 1371 * Iterate over all children. 1372 */ 1373 if (flags & DS_FIND_CHILDREN) { 1374 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1375 dd->dd_phys->dd_child_dir_zapobj); 1376 zap_cursor_retrieve(&zc, attr) == 0; 1377 (void) zap_cursor_advance(&zc)) { 1378 ASSERT(attr->za_integer_length == sizeof (uint64_t)); 1379 ASSERT(attr->za_num_integers == 1); 1380 1381 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1382 (void) strcpy(child, name); 1383 (void) strcat(child, "/"); 1384 (void) strcat(child, attr->za_name); 1385 err = dmu_objset_find_spa(spa, child, func, arg, flags); 1386 kmem_free(child, MAXPATHLEN); 1387 if (err) 1388 break; 1389 } 1390 zap_cursor_fini(&zc); 1391 1392 if (err) { 1393 dsl_dir_close(dd, FTAG); 1394 kmem_free(attr, sizeof (zap_attribute_t)); 1395 return (err); 1396 } 1397 } 1398 1399 /* 1400 * Iterate over all snapshots. 1401 */ 1402 if (flags & DS_FIND_SNAPSHOTS) { 1403 if (!dsl_pool_sync_context(dp)) 1404 rw_enter(&dp->dp_config_rwlock, RW_READER); 1405 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1406 if (!dsl_pool_sync_context(dp)) 1407 rw_exit(&dp->dp_config_rwlock); 1408 1409 if (err == 0) { 1410 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 1411 dsl_dataset_rele(ds, FTAG); 1412 1413 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1414 zap_cursor_retrieve(&zc, attr) == 0; 1415 (void) zap_cursor_advance(&zc)) { 1416 ASSERT(attr->za_integer_length == 1417 sizeof (uint64_t)); 1418 ASSERT(attr->za_num_integers == 1); 1419 1420 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1421 (void) strcpy(child, name); 1422 (void) strcat(child, "@"); 1423 (void) strcat(child, attr->za_name); 1424 err = func(spa, attr->za_first_integer, 1425 child, arg); 1426 kmem_free(child, MAXPATHLEN); 1427 if (err) 1428 break; 1429 } 1430 zap_cursor_fini(&zc); 1431 } 1432 } 1433 1434 dsl_dir_close(dd, FTAG); 1435 kmem_free(attr, sizeof (zap_attribute_t)); 1436 1437 if (err) 1438 return (err); 1439 1440 /* 1441 * Apply to self if appropriate. 1442 */ 1443 err = func(spa, thisobj, name, arg); 1444 return (err); 1445 } 1446 1447 /* ARGSUSED */ 1448 int 1449 dmu_objset_prefetch(char *name, void *arg) 1450 { 1451 dsl_dataset_t *ds; 1452 1453 if (dsl_dataset_hold(name, FTAG, &ds)) 1454 return (0); 1455 1456 if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) { 1457 mutex_enter(&ds->ds_opening_lock); 1458 if (ds->ds_objset == NULL) { 1459 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; 1460 zbookmark_t zb; 1461 1462 SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT, 1463 ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1464 1465 (void) arc_read_nolock(NULL, dsl_dataset_get_spa(ds), 1466 &ds->ds_phys->ds_bp, NULL, NULL, 1467 ZIO_PRIORITY_ASYNC_READ, 1468 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1469 &aflags, &zb); 1470 } 1471 mutex_exit(&ds->ds_opening_lock); 1472 } 1473 1474 dsl_dataset_rele(ds, FTAG); 1475 return (0); 1476 } 1477 1478 void 1479 dmu_objset_set_user(objset_t *os, void *user_ptr) 1480 { 1481 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1482 os->os_user_ptr = user_ptr; 1483 } 1484 1485 void * 1486 dmu_objset_get_user(objset_t *os) 1487 { 1488 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1489 return (os->os_user_ptr); 1490 } 1491