1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/cred.h> 29 #include <sys/zfs_context.h> 30 #include <sys/dmu_objset.h> 31 #include <sys/dsl_dir.h> 32 #include <sys/dsl_dataset.h> 33 #include <sys/dsl_prop.h> 34 #include <sys/dsl_pool.h> 35 #include <sys/dsl_synctask.h> 36 #include <sys/dsl_deleg.h> 37 #include <sys/dnode.h> 38 #include <sys/dbuf.h> 39 #include <sys/zvol.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zio_checksum.h> 42 #include <sys/zap.h> 43 #include <sys/zil.h> 44 #include <sys/dmu_impl.h> 45 #include <sys/zfs_ioctl.h> 46 47 spa_t * 48 dmu_objset_spa(objset_t *os) 49 { 50 return (os->os->os_spa); 51 } 52 53 zilog_t * 54 dmu_objset_zil(objset_t *os) 55 { 56 return (os->os->os_zil); 57 } 58 59 dsl_pool_t * 60 dmu_objset_pool(objset_t *os) 61 { 62 dsl_dataset_t *ds; 63 64 if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir) 65 return (ds->ds_dir->dd_pool); 66 else 67 return (spa_get_dsl(os->os->os_spa)); 68 } 69 70 dsl_dataset_t * 71 dmu_objset_ds(objset_t *os) 72 { 73 return (os->os->os_dsl_dataset); 74 } 75 76 dmu_objset_type_t 77 dmu_objset_type(objset_t *os) 78 { 79 return (os->os->os_phys->os_type); 80 } 81 82 void 83 dmu_objset_name(objset_t *os, char *buf) 84 { 85 dsl_dataset_name(os->os->os_dsl_dataset, buf); 86 } 87 88 uint64_t 89 dmu_objset_id(objset_t *os) 90 { 91 dsl_dataset_t *ds = os->os->os_dsl_dataset; 92 93 return (ds ? ds->ds_object : 0); 94 } 95 96 static void 97 checksum_changed_cb(void *arg, uint64_t newval) 98 { 99 objset_impl_t *osi = arg; 100 101 /* 102 * Inheritance should have been done by now. 103 */ 104 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 105 106 osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 107 } 108 109 static void 110 compression_changed_cb(void *arg, uint64_t newval) 111 { 112 objset_impl_t *osi = arg; 113 114 /* 115 * Inheritance and range checking should have been done by now. 116 */ 117 ASSERT(newval != ZIO_COMPRESS_INHERIT); 118 119 osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE); 120 } 121 122 static void 123 copies_changed_cb(void *arg, uint64_t newval) 124 { 125 objset_impl_t *osi = arg; 126 127 /* 128 * Inheritance and range checking should have been done by now. 129 */ 130 ASSERT(newval > 0); 131 ASSERT(newval <= spa_max_replication(osi->os_spa)); 132 133 osi->os_copies = newval; 134 } 135 136 void 137 dmu_objset_byteswap(void *buf, size_t size) 138 { 139 objset_phys_t *osp = buf; 140 141 ASSERT(size == sizeof (objset_phys_t)); 142 dnode_byteswap(&osp->os_meta_dnode); 143 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 144 osp->os_type = BSWAP_64(osp->os_type); 145 } 146 147 int 148 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 149 objset_impl_t **osip) 150 { 151 objset_impl_t *osi; 152 int i, err; 153 154 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 155 156 osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP); 157 osi->os.os = osi; 158 osi->os_dsl_dataset = ds; 159 osi->os_spa = spa; 160 osi->os_rootbp = bp; 161 if (!BP_IS_HOLE(osi->os_rootbp)) { 162 uint32_t aflags = ARC_WAIT; 163 zbookmark_t zb; 164 zb.zb_objset = ds ? ds->ds_object : 0; 165 zb.zb_object = 0; 166 zb.zb_level = -1; 167 zb.zb_blkid = 0; 168 169 dprintf_bp(osi->os_rootbp, "reading %s", ""); 170 /* 171 * NB: when bprewrite scrub can change the bp, 172 * and this is called from dmu_objset_open_ds_os, the bp 173 * could change, and we'll need a lock. 174 */ 175 err = arc_read_nolock(NULL, spa, osi->os_rootbp, 176 arc_getbuf_func, &osi->os_phys_buf, 177 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 178 if (err) { 179 kmem_free(osi, sizeof (objset_impl_t)); 180 return (err); 181 } 182 osi->os_phys = osi->os_phys_buf->b_data; 183 } else { 184 osi->os_phys_buf = arc_buf_alloc(spa, sizeof (objset_phys_t), 185 &osi->os_phys_buf, ARC_BUFC_METADATA); 186 osi->os_phys = osi->os_phys_buf->b_data; 187 bzero(osi->os_phys, sizeof (objset_phys_t)); 188 } 189 190 /* 191 * Note: the changed_cb will be called once before the register 192 * func returns, thus changing the checksum/compression from the 193 * default (fletcher2/off). Snapshots don't need to know, and 194 * registering would complicate clone promotion. 195 */ 196 if (ds && ds->ds_phys->ds_num_children == 0) { 197 err = dsl_prop_register(ds, "checksum", 198 checksum_changed_cb, osi); 199 if (err == 0) 200 err = dsl_prop_register(ds, "compression", 201 compression_changed_cb, osi); 202 if (err == 0) 203 err = dsl_prop_register(ds, "copies", 204 copies_changed_cb, osi); 205 if (err) { 206 VERIFY(arc_buf_remove_ref(osi->os_phys_buf, 207 &osi->os_phys_buf) == 1); 208 kmem_free(osi, sizeof (objset_impl_t)); 209 return (err); 210 } 211 } else if (ds == NULL) { 212 /* It's the meta-objset. */ 213 osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 214 osi->os_compress = ZIO_COMPRESS_LZJB; 215 osi->os_copies = spa_max_replication(spa); 216 } 217 218 osi->os_zil_header = osi->os_phys->os_zil_header; 219 osi->os_zil = zil_alloc(&osi->os, &osi->os_zil_header); 220 221 for (i = 0; i < TXG_SIZE; i++) { 222 list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t), 223 offsetof(dnode_t, dn_dirty_link[i])); 224 list_create(&osi->os_free_dnodes[i], sizeof (dnode_t), 225 offsetof(dnode_t, dn_dirty_link[i])); 226 } 227 list_create(&osi->os_dnodes, sizeof (dnode_t), 228 offsetof(dnode_t, dn_link)); 229 list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 230 offsetof(dmu_buf_impl_t, db_link)); 231 232 mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL); 233 mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 234 mutex_init(&osi->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 235 236 osi->os_meta_dnode = dnode_special_open(osi, 237 &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT); 238 239 /* 240 * We should be the only thread trying to do this because we 241 * have ds_opening_lock 242 */ 243 if (ds) { 244 VERIFY(NULL == dsl_dataset_set_user_ptr(ds, osi, 245 dmu_objset_evict)); 246 } 247 248 *osip = osi; 249 return (0); 250 } 251 252 static int 253 dmu_objset_open_ds_os(dsl_dataset_t *ds, objset_t *os, dmu_objset_type_t type) 254 { 255 objset_impl_t *osi; 256 257 mutex_enter(&ds->ds_opening_lock); 258 osi = dsl_dataset_get_user_ptr(ds); 259 if (osi == NULL) { 260 int err; 261 262 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 263 ds, &ds->ds_phys->ds_bp, &osi); 264 if (err) { 265 mutex_exit(&ds->ds_opening_lock); 266 return (err); 267 } 268 } 269 mutex_exit(&ds->ds_opening_lock); 270 271 os->os = osi; 272 os->os_mode = DS_MODE_NOHOLD; 273 274 if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) 275 return (EINVAL); 276 return (0); 277 } 278 279 int 280 dmu_objset_open_ds(dsl_dataset_t *ds, dmu_objset_type_t type, objset_t **osp) 281 { 282 objset_t *os; 283 int err; 284 285 os = kmem_alloc(sizeof (objset_t), KM_SLEEP); 286 err = dmu_objset_open_ds_os(ds, os, type); 287 if (err) 288 kmem_free(os, sizeof (objset_t)); 289 else 290 *osp = os; 291 return (err); 292 } 293 294 /* called from zpl */ 295 int 296 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode, 297 objset_t **osp) 298 { 299 objset_t *os; 300 dsl_dataset_t *ds; 301 int err; 302 303 ASSERT(DS_MODE_TYPE(mode) == DS_MODE_USER || 304 DS_MODE_TYPE(mode) == DS_MODE_OWNER); 305 306 os = kmem_alloc(sizeof (objset_t), KM_SLEEP); 307 if (DS_MODE_TYPE(mode) == DS_MODE_USER) 308 err = dsl_dataset_hold(name, os, &ds); 309 else 310 err = dsl_dataset_own(name, mode, os, &ds); 311 if (err) { 312 kmem_free(os, sizeof (objset_t)); 313 return (err); 314 } 315 316 err = dmu_objset_open_ds_os(ds, os, type); 317 if (err) { 318 if (DS_MODE_TYPE(mode) == DS_MODE_USER) 319 dsl_dataset_rele(ds, os); 320 else 321 dsl_dataset_disown(ds, os); 322 kmem_free(os, sizeof (objset_t)); 323 } else { 324 os->os_mode = mode; 325 *osp = os; 326 } 327 return (err); 328 } 329 330 void 331 dmu_objset_close(objset_t *os) 332 { 333 ASSERT(DS_MODE_TYPE(os->os_mode) == DS_MODE_USER || 334 DS_MODE_TYPE(os->os_mode) == DS_MODE_OWNER || 335 DS_MODE_TYPE(os->os_mode) == DS_MODE_NOHOLD); 336 337 if (DS_MODE_TYPE(os->os_mode) == DS_MODE_USER) 338 dsl_dataset_rele(os->os->os_dsl_dataset, os); 339 else if (DS_MODE_TYPE(os->os_mode) == DS_MODE_OWNER) 340 dsl_dataset_disown(os->os->os_dsl_dataset, os); 341 kmem_free(os, sizeof (objset_t)); 342 } 343 344 int 345 dmu_objset_evict_dbufs(objset_t *os) 346 { 347 objset_impl_t *osi = os->os; 348 dnode_t *dn; 349 350 mutex_enter(&osi->os_lock); 351 352 /* process the mdn last, since the other dnodes have holds on it */ 353 list_remove(&osi->os_dnodes, osi->os_meta_dnode); 354 list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode); 355 356 /* 357 * Find the first dnode with holds. We have to do this dance 358 * because dnode_add_ref() only works if you already have a 359 * hold. If there are no holds then it has no dbufs so OK to 360 * skip. 361 */ 362 for (dn = list_head(&osi->os_dnodes); 363 dn && !dnode_add_ref(dn, FTAG); 364 dn = list_next(&osi->os_dnodes, dn)) 365 continue; 366 367 while (dn) { 368 dnode_t *next_dn = dn; 369 370 do { 371 next_dn = list_next(&osi->os_dnodes, next_dn); 372 } while (next_dn && !dnode_add_ref(next_dn, FTAG)); 373 374 mutex_exit(&osi->os_lock); 375 dnode_evict_dbufs(dn); 376 dnode_rele(dn, FTAG); 377 mutex_enter(&osi->os_lock); 378 dn = next_dn; 379 } 380 mutex_exit(&osi->os_lock); 381 return (list_head(&osi->os_dnodes) != osi->os_meta_dnode); 382 } 383 384 void 385 dmu_objset_evict(dsl_dataset_t *ds, void *arg) 386 { 387 objset_impl_t *osi = arg; 388 objset_t os; 389 int i; 390 391 for (i = 0; i < TXG_SIZE; i++) { 392 ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL); 393 ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL); 394 } 395 396 if (ds && ds->ds_phys && ds->ds_phys->ds_num_children == 0) { 397 VERIFY(0 == dsl_prop_unregister(ds, "checksum", 398 checksum_changed_cb, osi)); 399 VERIFY(0 == dsl_prop_unregister(ds, "compression", 400 compression_changed_cb, osi)); 401 VERIFY(0 == dsl_prop_unregister(ds, "copies", 402 copies_changed_cb, osi)); 403 } 404 405 /* 406 * We should need only a single pass over the dnode list, since 407 * nothing can be added to the list at this point. 408 */ 409 os.os = osi; 410 (void) dmu_objset_evict_dbufs(&os); 411 412 ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode); 413 ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode); 414 ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL); 415 416 dnode_special_close(osi->os_meta_dnode); 417 zil_free(osi->os_zil); 418 419 VERIFY(arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf) == 1); 420 mutex_destroy(&osi->os_lock); 421 mutex_destroy(&osi->os_obj_lock); 422 mutex_destroy(&osi->os_user_ptr_lock); 423 kmem_free(osi, sizeof (objset_impl_t)); 424 } 425 426 /* called from dsl for meta-objset */ 427 objset_impl_t * 428 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 429 dmu_objset_type_t type, dmu_tx_t *tx) 430 { 431 objset_impl_t *osi; 432 dnode_t *mdn; 433 434 ASSERT(dmu_tx_is_syncing(tx)); 435 if (ds) 436 mutex_enter(&ds->ds_opening_lock); 437 VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &osi)); 438 if (ds) 439 mutex_exit(&ds->ds_opening_lock); 440 mdn = osi->os_meta_dnode; 441 442 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 443 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 444 445 /* 446 * We don't want to have to increase the meta-dnode's nlevels 447 * later, because then we could do it in quescing context while 448 * we are also accessing it in open context. 449 * 450 * This precaution is not necessary for the MOS (ds == NULL), 451 * because the MOS is only updated in syncing context. 452 * This is most fortunate: the MOS is the only objset that 453 * needs to be synced multiple times as spa_sync() iterates 454 * to convergence, so minimizing its dn_nlevels matters. 455 */ 456 if (ds != NULL) { 457 int levels = 1; 458 459 /* 460 * Determine the number of levels necessary for the meta-dnode 461 * to contain DN_MAX_OBJECT dnodes. 462 */ 463 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + 464 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 465 DN_MAX_OBJECT * sizeof (dnode_phys_t)) 466 levels++; 467 468 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 469 mdn->dn_nlevels = levels; 470 } 471 472 ASSERT(type != DMU_OST_NONE); 473 ASSERT(type != DMU_OST_ANY); 474 ASSERT(type < DMU_OST_NUMTYPES); 475 osi->os_phys->os_type = type; 476 477 dsl_dataset_dirty(ds, tx); 478 479 return (osi); 480 } 481 482 struct oscarg { 483 void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); 484 void *userarg; 485 dsl_dataset_t *clone_parent; 486 const char *lastname; 487 dmu_objset_type_t type; 488 uint64_t flags; 489 }; 490 491 /*ARGSUSED*/ 492 static int 493 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx) 494 { 495 dsl_dir_t *dd = arg1; 496 struct oscarg *oa = arg2; 497 objset_t *mos = dd->dd_pool->dp_meta_objset; 498 int err; 499 uint64_t ddobj; 500 501 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, 502 oa->lastname, sizeof (uint64_t), 1, &ddobj); 503 if (err != ENOENT) 504 return (err ? err : EEXIST); 505 506 if (oa->clone_parent != NULL) { 507 /* 508 * You can't clone across pools. 509 */ 510 if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool) 511 return (EXDEV); 512 513 /* 514 * You can only clone snapshots, not the head datasets. 515 */ 516 if (oa->clone_parent->ds_phys->ds_num_children == 0) 517 return (EINVAL); 518 } 519 520 return (0); 521 } 522 523 static void 524 dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 525 { 526 dsl_dir_t *dd = arg1; 527 struct oscarg *oa = arg2; 528 dsl_dataset_t *ds; 529 blkptr_t *bp; 530 uint64_t dsobj; 531 532 ASSERT(dmu_tx_is_syncing(tx)); 533 534 dsobj = dsl_dataset_create_sync(dd, oa->lastname, 535 oa->clone_parent, oa->flags, cr, tx); 536 537 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj, FTAG, &ds)); 538 bp = dsl_dataset_get_blkptr(ds); 539 if (BP_IS_HOLE(bp)) { 540 objset_impl_t *osi; 541 542 /* This is an empty dmu_objset; not a clone. */ 543 osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds), 544 ds, bp, oa->type, tx); 545 546 if (oa->userfunc) 547 oa->userfunc(&osi->os, oa->userarg, cr, tx); 548 } 549 550 spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa, 551 tx, cr, "dataset = %llu", dsobj); 552 553 dsl_dataset_rele(ds, FTAG); 554 } 555 556 int 557 dmu_objset_create(const char *name, dmu_objset_type_t type, 558 objset_t *clone_parent, uint64_t flags, 559 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 560 { 561 dsl_dir_t *pdd; 562 const char *tail; 563 int err = 0; 564 struct oscarg oa = { 0 }; 565 566 ASSERT(strchr(name, '@') == NULL); 567 err = dsl_dir_open(name, FTAG, &pdd, &tail); 568 if (err) 569 return (err); 570 if (tail == NULL) { 571 dsl_dir_close(pdd, FTAG); 572 return (EEXIST); 573 } 574 575 dprintf("name=%s\n", name); 576 577 oa.userfunc = func; 578 oa.userarg = arg; 579 oa.lastname = tail; 580 oa.type = type; 581 oa.flags = flags; 582 583 if (clone_parent != NULL) { 584 /* 585 * You can't clone to a different type. 586 */ 587 if (clone_parent->os->os_phys->os_type != type) { 588 dsl_dir_close(pdd, FTAG); 589 return (EINVAL); 590 } 591 oa.clone_parent = clone_parent->os->os_dsl_dataset; 592 } 593 err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check, 594 dmu_objset_create_sync, pdd, &oa, 5); 595 dsl_dir_close(pdd, FTAG); 596 return (err); 597 } 598 599 int 600 dmu_objset_destroy(const char *name) 601 { 602 objset_t *os; 603 int error; 604 605 /* 606 * If it looks like we'll be able to destroy it, and there's 607 * an unplayed replay log sitting around, destroy the log. 608 * It would be nicer to do this in dsl_dataset_destroy_sync(), 609 * but the replay log objset is modified in open context. 610 */ 611 error = dmu_objset_open(name, DMU_OST_ANY, 612 DS_MODE_OWNER|DS_MODE_READONLY|DS_MODE_INCONSISTENT, &os); 613 if (error == 0) { 614 dsl_dataset_t *ds = os->os->os_dsl_dataset; 615 zil_destroy(dmu_objset_zil(os), B_FALSE); 616 617 error = dsl_dataset_destroy(ds, os); 618 /* 619 * dsl_dataset_destroy() closes the ds. 620 */ 621 kmem_free(os, sizeof (objset_t)); 622 } 623 624 return (error); 625 } 626 627 /* 628 * This will close the objset. 629 */ 630 int 631 dmu_objset_rollback(objset_t *os) 632 { 633 int err; 634 dsl_dataset_t *ds; 635 636 ds = os->os->os_dsl_dataset; 637 638 if (!dsl_dataset_tryown(ds, TRUE, os)) { 639 dmu_objset_close(os); 640 return (EBUSY); 641 } 642 643 err = dsl_dataset_rollback(ds, os->os->os_phys->os_type); 644 645 /* 646 * NB: we close the objset manually because the rollback 647 * actually implicitly called dmu_objset_evict(), thus freeing 648 * the objset_impl_t. 649 */ 650 dsl_dataset_disown(ds, os); 651 kmem_free(os, sizeof (objset_t)); 652 return (err); 653 } 654 655 struct snaparg { 656 dsl_sync_task_group_t *dstg; 657 char *snapname; 658 char failed[MAXPATHLEN]; 659 boolean_t checkperms; 660 list_t objsets; 661 }; 662 663 struct osnode { 664 list_node_t node; 665 objset_t *os; 666 }; 667 668 static int 669 dmu_objset_snapshot_one(char *name, void *arg) 670 { 671 struct snaparg *sn = arg; 672 objset_t *os; 673 int err; 674 675 (void) strcpy(sn->failed, name); 676 677 /* 678 * Check permissions only when requested. This only applies when 679 * doing a recursive snapshot. The permission checks for the starting 680 * dataset have already been performed in zfs_secpolicy_snapshot() 681 */ 682 if (sn->checkperms == B_TRUE && 683 (err = zfs_secpolicy_snapshot_perms(name, CRED()))) 684 return (err); 685 686 err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_USER, &os); 687 if (err != 0) 688 return (err); 689 690 /* If the objset is in an inconsistent state, return busy */ 691 if (os->os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) { 692 dmu_objset_close(os); 693 return (EBUSY); 694 } 695 696 /* 697 * NB: we need to wait for all in-flight changes to get to disk, 698 * so that we snapshot those changes. zil_suspend does this as 699 * a side effect. 700 */ 701 err = zil_suspend(dmu_objset_zil(os)); 702 if (err == 0) { 703 struct osnode *osn; 704 dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check, 705 dsl_dataset_snapshot_sync, os->os->os_dsl_dataset, 706 sn->snapname, 3); 707 osn = kmem_alloc(sizeof (struct osnode), KM_SLEEP); 708 osn->os = os; 709 list_insert_tail(&sn->objsets, osn); 710 } else { 711 dmu_objset_close(os); 712 } 713 714 return (err); 715 } 716 717 int 718 dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive) 719 { 720 dsl_sync_task_t *dst; 721 struct osnode *osn; 722 struct snaparg sn = { 0 }; 723 spa_t *spa; 724 int err; 725 726 (void) strcpy(sn.failed, fsname); 727 728 err = spa_open(fsname, &spa, FTAG); 729 if (err) 730 return (err); 731 732 sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 733 sn.snapname = snapname; 734 list_create(&sn.objsets, sizeof (struct osnode), 735 offsetof(struct osnode, node)); 736 737 if (recursive) { 738 sn.checkperms = B_TRUE; 739 err = dmu_objset_find(fsname, 740 dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN); 741 } else { 742 sn.checkperms = B_FALSE; 743 err = dmu_objset_snapshot_one(fsname, &sn); 744 } 745 746 if (err) 747 goto out; 748 749 err = dsl_sync_task_group_wait(sn.dstg); 750 751 for (dst = list_head(&sn.dstg->dstg_tasks); dst; 752 dst = list_next(&sn.dstg->dstg_tasks, dst)) { 753 dsl_dataset_t *ds = dst->dst_arg1; 754 if (dst->dst_err) 755 dsl_dataset_name(ds, sn.failed); 756 } 757 758 out: 759 while (osn = list_head(&sn.objsets)) { 760 list_remove(&sn.objsets, osn); 761 zil_resume(dmu_objset_zil(osn->os)); 762 dmu_objset_close(osn->os); 763 kmem_free(osn, sizeof (struct osnode)); 764 } 765 list_destroy(&sn.objsets); 766 767 if (err) 768 (void) strcpy(fsname, sn.failed); 769 dsl_sync_task_group_destroy(sn.dstg); 770 spa_close(spa, FTAG); 771 return (err); 772 } 773 774 static void 775 dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx) 776 { 777 dnode_t *dn; 778 779 while (dn = list_head(list)) { 780 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 781 ASSERT(dn->dn_dbuf->db_data_pending); 782 /* 783 * Initialize dn_zio outside dnode_sync() 784 * to accomodate meta-dnode 785 */ 786 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 787 ASSERT(dn->dn_zio); 788 789 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 790 list_remove(list, dn); 791 dnode_sync(dn, tx); 792 } 793 } 794 795 /* ARGSUSED */ 796 static void 797 ready(zio_t *zio, arc_buf_t *abuf, void *arg) 798 { 799 objset_impl_t *os = arg; 800 blkptr_t *bp = os->os_rootbp; 801 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 802 int i; 803 804 ASSERT(bp == zio->io_bp); 805 806 /* 807 * Update rootbp fill count. 808 */ 809 bp->blk_fill = 1; /* count the meta-dnode */ 810 for (i = 0; i < dnp->dn_nblkptr; i++) 811 bp->blk_fill += dnp->dn_blkptr[i].blk_fill; 812 813 BP_SET_TYPE(bp, DMU_OT_OBJSET); 814 BP_SET_LEVEL(bp, 0); 815 816 /* We must do this after we've set the bp's type and level */ 817 if (!DVA_EQUAL(BP_IDENTITY(bp), 818 BP_IDENTITY(&zio->io_bp_orig))) { 819 if (zio->io_bp_orig.blk_birth == os->os_synctx->tx_txg) 820 (void) dsl_dataset_block_kill(os->os_dsl_dataset, 821 &zio->io_bp_orig, NULL, os->os_synctx); 822 dsl_dataset_block_born(os->os_dsl_dataset, bp, os->os_synctx); 823 } 824 } 825 826 /* called from dsl */ 827 void 828 dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx) 829 { 830 int txgoff; 831 zbookmark_t zb; 832 writeprops_t wp = { 0 }; 833 zio_t *zio; 834 list_t *list; 835 dbuf_dirty_record_t *dr; 836 837 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 838 839 ASSERT(dmu_tx_is_syncing(tx)); 840 /* XXX the write_done callback should really give us the tx... */ 841 os->os_synctx = tx; 842 843 if (os->os_dsl_dataset == NULL) { 844 /* 845 * This is the MOS. If we have upgraded, 846 * spa_max_replication() could change, so reset 847 * os_copies here. 848 */ 849 os->os_copies = spa_max_replication(os->os_spa); 850 } 851 852 /* 853 * Create the root block IO 854 */ 855 zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; 856 zb.zb_object = 0; 857 zb.zb_level = -1; 858 zb.zb_blkid = 0; 859 if (BP_IS_OLDER(os->os_rootbp, tx->tx_txg)) { 860 (void) dsl_dataset_block_kill(os->os_dsl_dataset, 861 os->os_rootbp, pio, tx); 862 } 863 wp.wp_type = DMU_OT_OBJSET; 864 wp.wp_copies = os->os_copies; 865 wp.wp_level = (uint8_t)-1; 866 wp.wp_oschecksum = os->os_checksum; 867 wp.wp_oscompress = os->os_compress; 868 arc_release(os->os_phys_buf, &os->os_phys_buf); 869 zio = arc_write(pio, os->os_spa, &wp, 870 tx->tx_txg, os->os_rootbp, os->os_phys_buf, ready, NULL, os, 871 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_METADATA, 872 &zb); 873 874 /* 875 * Sync meta-dnode - the parent IO for the sync is the root block 876 */ 877 os->os_meta_dnode->dn_zio = zio; 878 dnode_sync(os->os_meta_dnode, tx); 879 880 txgoff = tx->tx_txg & TXG_MASK; 881 882 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], tx); 883 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], tx); 884 885 list = &os->os_meta_dnode->dn_dirty_records[txgoff]; 886 while (dr = list_head(list)) { 887 ASSERT(dr->dr_dbuf->db_level == 0); 888 list_remove(list, dr); 889 if (dr->dr_zio) 890 zio_nowait(dr->dr_zio); 891 } 892 /* 893 * Free intent log blocks up to this tx. 894 */ 895 zil_sync(os->os_zil, tx); 896 os->os_phys->os_zil_header = os->os_zil_header; 897 zio_nowait(zio); 898 } 899 900 void 901 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 902 uint64_t *usedobjsp, uint64_t *availobjsp) 903 { 904 dsl_dataset_space(os->os->os_dsl_dataset, refdbytesp, availbytesp, 905 usedobjsp, availobjsp); 906 } 907 908 uint64_t 909 dmu_objset_fsid_guid(objset_t *os) 910 { 911 return (dsl_dataset_fsid_guid(os->os->os_dsl_dataset)); 912 } 913 914 void 915 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 916 { 917 stat->dds_type = os->os->os_phys->os_type; 918 if (os->os->os_dsl_dataset) 919 dsl_dataset_fast_stat(os->os->os_dsl_dataset, stat); 920 } 921 922 void 923 dmu_objset_stats(objset_t *os, nvlist_t *nv) 924 { 925 ASSERT(os->os->os_dsl_dataset || 926 os->os->os_phys->os_type == DMU_OST_META); 927 928 if (os->os->os_dsl_dataset != NULL) 929 dsl_dataset_stats(os->os->os_dsl_dataset, nv); 930 931 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 932 os->os->os_phys->os_type); 933 } 934 935 int 936 dmu_objset_is_snapshot(objset_t *os) 937 { 938 if (os->os->os_dsl_dataset != NULL) 939 return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset)); 940 else 941 return (B_FALSE); 942 } 943 944 int 945 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 946 boolean_t *conflict) 947 { 948 dsl_dataset_t *ds = os->os->os_dsl_dataset; 949 uint64_t ignored; 950 951 if (ds->ds_phys->ds_snapnames_zapobj == 0) 952 return (ENOENT); 953 954 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 955 ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST, 956 real, maxlen, conflict)); 957 } 958 959 int 960 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 961 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 962 { 963 dsl_dataset_t *ds = os->os->os_dsl_dataset; 964 zap_cursor_t cursor; 965 zap_attribute_t attr; 966 967 if (ds->ds_phys->ds_snapnames_zapobj == 0) 968 return (ENOENT); 969 970 zap_cursor_init_serialized(&cursor, 971 ds->ds_dir->dd_pool->dp_meta_objset, 972 ds->ds_phys->ds_snapnames_zapobj, *offp); 973 974 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 975 zap_cursor_fini(&cursor); 976 return (ENOENT); 977 } 978 979 if (strlen(attr.za_name) + 1 > namelen) { 980 zap_cursor_fini(&cursor); 981 return (ENAMETOOLONG); 982 } 983 984 (void) strcpy(name, attr.za_name); 985 if (idp) 986 *idp = attr.za_first_integer; 987 if (case_conflict) 988 *case_conflict = attr.za_normalization_conflict; 989 zap_cursor_advance(&cursor); 990 *offp = zap_cursor_serialize(&cursor); 991 zap_cursor_fini(&cursor); 992 993 return (0); 994 } 995 996 int 997 dmu_dir_list_next(objset_t *os, int namelen, char *name, 998 uint64_t *idp, uint64_t *offp) 999 { 1000 dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir; 1001 zap_cursor_t cursor; 1002 zap_attribute_t attr; 1003 1004 /* there is no next dir on a snapshot! */ 1005 if (os->os->os_dsl_dataset->ds_object != 1006 dd->dd_phys->dd_head_dataset_obj) 1007 return (ENOENT); 1008 1009 zap_cursor_init_serialized(&cursor, 1010 dd->dd_pool->dp_meta_objset, 1011 dd->dd_phys->dd_child_dir_zapobj, *offp); 1012 1013 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1014 zap_cursor_fini(&cursor); 1015 return (ENOENT); 1016 } 1017 1018 if (strlen(attr.za_name) + 1 > namelen) { 1019 zap_cursor_fini(&cursor); 1020 return (ENAMETOOLONG); 1021 } 1022 1023 (void) strcpy(name, attr.za_name); 1024 if (idp) 1025 *idp = attr.za_first_integer; 1026 zap_cursor_advance(&cursor); 1027 *offp = zap_cursor_serialize(&cursor); 1028 zap_cursor_fini(&cursor); 1029 1030 return (0); 1031 } 1032 1033 struct findarg { 1034 int (*func)(char *, void *); 1035 void *arg; 1036 }; 1037 1038 /* ARGSUSED */ 1039 static int 1040 findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 1041 { 1042 struct findarg *fa = arg; 1043 return (fa->func((char *)dsname, fa->arg)); 1044 } 1045 1046 /* 1047 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 1048 * Perhaps change all callers to use dmu_objset_find_spa()? 1049 */ 1050 int 1051 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags) 1052 { 1053 struct findarg fa; 1054 fa.func = func; 1055 fa.arg = arg; 1056 return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags)); 1057 } 1058 1059 /* 1060 * Find all objsets under name, call func on each 1061 */ 1062 int 1063 dmu_objset_find_spa(spa_t *spa, const char *name, 1064 int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags) 1065 { 1066 dsl_dir_t *dd; 1067 dsl_pool_t *dp; 1068 dsl_dataset_t *ds; 1069 zap_cursor_t zc; 1070 zap_attribute_t *attr; 1071 char *child; 1072 uint64_t thisobj; 1073 int err; 1074 1075 if (name == NULL) 1076 name = spa_name(spa); 1077 err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL); 1078 if (err) 1079 return (err); 1080 1081 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1082 if (dd->dd_myname[0] == '$') { 1083 dsl_dir_close(dd, FTAG); 1084 return (0); 1085 } 1086 1087 thisobj = dd->dd_phys->dd_head_dataset_obj; 1088 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1089 dp = dd->dd_pool; 1090 1091 /* 1092 * Iterate over all children. 1093 */ 1094 if (flags & DS_FIND_CHILDREN) { 1095 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1096 dd->dd_phys->dd_child_dir_zapobj); 1097 zap_cursor_retrieve(&zc, attr) == 0; 1098 (void) zap_cursor_advance(&zc)) { 1099 ASSERT(attr->za_integer_length == sizeof (uint64_t)); 1100 ASSERT(attr->za_num_integers == 1); 1101 1102 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1103 (void) strcpy(child, name); 1104 (void) strcat(child, "/"); 1105 (void) strcat(child, attr->za_name); 1106 err = dmu_objset_find_spa(spa, child, func, arg, flags); 1107 kmem_free(child, MAXPATHLEN); 1108 if (err) 1109 break; 1110 } 1111 zap_cursor_fini(&zc); 1112 1113 if (err) { 1114 dsl_dir_close(dd, FTAG); 1115 kmem_free(attr, sizeof (zap_attribute_t)); 1116 return (err); 1117 } 1118 } 1119 1120 /* 1121 * Iterate over all snapshots. 1122 */ 1123 if (flags & DS_FIND_SNAPSHOTS) { 1124 if (!dsl_pool_sync_context(dp)) 1125 rw_enter(&dp->dp_config_rwlock, RW_READER); 1126 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1127 if (!dsl_pool_sync_context(dp)) 1128 rw_exit(&dp->dp_config_rwlock); 1129 1130 if (err == 0) { 1131 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 1132 dsl_dataset_rele(ds, FTAG); 1133 1134 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1135 zap_cursor_retrieve(&zc, attr) == 0; 1136 (void) zap_cursor_advance(&zc)) { 1137 ASSERT(attr->za_integer_length == 1138 sizeof (uint64_t)); 1139 ASSERT(attr->za_num_integers == 1); 1140 1141 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1142 (void) strcpy(child, name); 1143 (void) strcat(child, "@"); 1144 (void) strcat(child, attr->za_name); 1145 err = func(spa, attr->za_first_integer, 1146 child, arg); 1147 kmem_free(child, MAXPATHLEN); 1148 if (err) 1149 break; 1150 } 1151 zap_cursor_fini(&zc); 1152 } 1153 } 1154 1155 dsl_dir_close(dd, FTAG); 1156 kmem_free(attr, sizeof (zap_attribute_t)); 1157 1158 if (err) 1159 return (err); 1160 1161 /* 1162 * Apply to self if appropriate. 1163 */ 1164 err = func(spa, thisobj, name, arg); 1165 return (err); 1166 } 1167 1168 void 1169 dmu_objset_set_user(objset_t *os, void *user_ptr) 1170 { 1171 ASSERT(MUTEX_HELD(&os->os->os_user_ptr_lock)); 1172 os->os->os_user_ptr = user_ptr; 1173 } 1174 1175 void * 1176 dmu_objset_get_user(objset_t *os) 1177 { 1178 ASSERT(MUTEX_HELD(&os->os->os_user_ptr_lock)); 1179 return (os->os->os_user_ptr); 1180 } 1181