1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/cred.h> 29 #include <sys/zfs_context.h> 30 #include <sys/dmu_objset.h> 31 #include <sys/dsl_dir.h> 32 #include <sys/dsl_dataset.h> 33 #include <sys/dsl_prop.h> 34 #include <sys/dsl_pool.h> 35 #include <sys/dsl_synctask.h> 36 #include <sys/dsl_deleg.h> 37 #include <sys/dnode.h> 38 #include <sys/dbuf.h> 39 #include <sys/zvol.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zio_checksum.h> 42 #include <sys/zap.h> 43 #include <sys/zil.h> 44 #include <sys/dmu_impl.h> 45 #include <sys/zfs_ioctl.h> 46 47 spa_t * 48 dmu_objset_spa(objset_t *os) 49 { 50 return (os->os->os_spa); 51 } 52 53 zilog_t * 54 dmu_objset_zil(objset_t *os) 55 { 56 return (os->os->os_zil); 57 } 58 59 dsl_pool_t * 60 dmu_objset_pool(objset_t *os) 61 { 62 dsl_dataset_t *ds; 63 64 if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir) 65 return (ds->ds_dir->dd_pool); 66 else 67 return (spa_get_dsl(os->os->os_spa)); 68 } 69 70 dsl_dataset_t * 71 dmu_objset_ds(objset_t *os) 72 { 73 return (os->os->os_dsl_dataset); 74 } 75 76 dmu_objset_type_t 77 dmu_objset_type(objset_t *os) 78 { 79 return (os->os->os_phys->os_type); 80 } 81 82 void 83 dmu_objset_name(objset_t *os, char *buf) 84 { 85 dsl_dataset_name(os->os->os_dsl_dataset, buf); 86 } 87 88 uint64_t 89 dmu_objset_id(objset_t *os) 90 { 91 dsl_dataset_t *ds = os->os->os_dsl_dataset; 92 93 return (ds ? ds->ds_object : 0); 94 } 95 96 static void 97 checksum_changed_cb(void *arg, uint64_t newval) 98 { 99 objset_impl_t *osi = arg; 100 101 /* 102 * Inheritance should have been done by now. 103 */ 104 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 105 106 osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 107 } 108 109 static void 110 compression_changed_cb(void *arg, uint64_t newval) 111 { 112 objset_impl_t *osi = arg; 113 114 /* 115 * Inheritance and range checking should have been done by now. 116 */ 117 ASSERT(newval != ZIO_COMPRESS_INHERIT); 118 119 osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE); 120 } 121 122 static void 123 copies_changed_cb(void *arg, uint64_t newval) 124 { 125 objset_impl_t *osi = arg; 126 127 /* 128 * Inheritance and range checking should have been done by now. 129 */ 130 ASSERT(newval > 0); 131 ASSERT(newval <= spa_max_replication(osi->os_spa)); 132 133 osi->os_copies = newval; 134 } 135 136 void 137 dmu_objset_byteswap(void *buf, size_t size) 138 { 139 objset_phys_t *osp = buf; 140 141 ASSERT(size == sizeof (objset_phys_t)); 142 dnode_byteswap(&osp->os_meta_dnode); 143 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 144 osp->os_type = BSWAP_64(osp->os_type); 145 } 146 147 int 148 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 149 objset_impl_t **osip) 150 { 151 objset_impl_t *osi; 152 int i, err, checksum; 153 154 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 155 156 osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP); 157 osi->os.os = osi; 158 osi->os_dsl_dataset = ds; 159 osi->os_spa = spa; 160 osi->os_rootbp = bp; 161 if (!BP_IS_HOLE(osi->os_rootbp)) { 162 uint32_t aflags = ARC_WAIT; 163 zbookmark_t zb; 164 zb.zb_objset = ds ? ds->ds_object : 0; 165 zb.zb_object = 0; 166 zb.zb_level = -1; 167 zb.zb_blkid = 0; 168 169 dprintf_bp(osi->os_rootbp, "reading %s", ""); 170 err = arc_read(NULL, spa, osi->os_rootbp, 171 dmu_ot[DMU_OT_OBJSET].ot_byteswap, 172 arc_getbuf_func, &osi->os_phys_buf, 173 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 174 if (err) { 175 kmem_free(osi, sizeof (objset_impl_t)); 176 return (err); 177 } 178 osi->os_phys = osi->os_phys_buf->b_data; 179 if (ds == NULL || dsl_dataset_is_snapshot(ds) == 0) 180 arc_release(osi->os_phys_buf, &osi->os_phys_buf); 181 } else { 182 osi->os_phys_buf = arc_buf_alloc(spa, sizeof (objset_phys_t), 183 &osi->os_phys_buf, ARC_BUFC_METADATA); 184 osi->os_phys = osi->os_phys_buf->b_data; 185 bzero(osi->os_phys, sizeof (objset_phys_t)); 186 } 187 188 /* 189 * Note: the changed_cb will be called once before the register 190 * func returns, thus changing the checksum/compression from the 191 * default (fletcher2/off). Snapshots don't need to know, and 192 * registering would complicate clone promotion. 193 */ 194 if (ds && ds->ds_phys->ds_num_children == 0) { 195 err = dsl_prop_register(ds, "checksum", 196 checksum_changed_cb, osi); 197 if (err == 0) 198 err = dsl_prop_register(ds, "compression", 199 compression_changed_cb, osi); 200 if (err == 0) 201 err = dsl_prop_register(ds, "copies", 202 copies_changed_cb, osi); 203 if (err) { 204 VERIFY(arc_buf_remove_ref(osi->os_phys_buf, 205 &osi->os_phys_buf) == 1); 206 kmem_free(osi, sizeof (objset_impl_t)); 207 return (err); 208 } 209 } else if (ds == NULL) { 210 /* It's the meta-objset. */ 211 osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 212 osi->os_compress = ZIO_COMPRESS_LZJB; 213 osi->os_copies = spa_max_replication(spa); 214 } 215 216 osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header); 217 218 /* 219 * Metadata always gets compressed and checksummed. 220 * If the data checksum is multi-bit correctable, and it's not 221 * a ZBT-style checksum, then it's suitable for metadata as well. 222 * Otherwise, the metadata checksum defaults to fletcher4. 223 */ 224 checksum = osi->os_checksum; 225 226 if (zio_checksum_table[checksum].ci_correctable && 227 !zio_checksum_table[checksum].ci_zbt) 228 osi->os_md_checksum = checksum; 229 else 230 osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4; 231 osi->os_md_compress = ZIO_COMPRESS_LZJB; 232 233 for (i = 0; i < TXG_SIZE; i++) { 234 list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t), 235 offsetof(dnode_t, dn_dirty_link[i])); 236 list_create(&osi->os_free_dnodes[i], sizeof (dnode_t), 237 offsetof(dnode_t, dn_dirty_link[i])); 238 } 239 list_create(&osi->os_dnodes, sizeof (dnode_t), 240 offsetof(dnode_t, dn_link)); 241 list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 242 offsetof(dmu_buf_impl_t, db_link)); 243 244 mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL); 245 mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 246 mutex_init(&osi->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 247 248 osi->os_meta_dnode = dnode_special_open(osi, 249 &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT); 250 251 /* 252 * We should be the only thread trying to do this because we 253 * have ds_opening_lock 254 */ 255 if (ds) { 256 VERIFY(NULL == dsl_dataset_set_user_ptr(ds, osi, 257 dmu_objset_evict)); 258 } 259 260 *osip = osi; 261 return (0); 262 } 263 264 static int 265 dmu_objset_open_ds_os(dsl_dataset_t *ds, objset_t *os, dmu_objset_type_t type) 266 { 267 objset_impl_t *osi; 268 269 mutex_enter(&ds->ds_opening_lock); 270 osi = dsl_dataset_get_user_ptr(ds); 271 if (osi == NULL) { 272 int err; 273 274 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 275 ds, &ds->ds_phys->ds_bp, &osi); 276 if (err) { 277 mutex_exit(&ds->ds_opening_lock); 278 return (err); 279 } 280 } 281 mutex_exit(&ds->ds_opening_lock); 282 283 os->os = osi; 284 os->os_mode = DS_MODE_NOHOLD; 285 286 if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) 287 return (EINVAL); 288 return (0); 289 } 290 291 int 292 dmu_objset_open_ds(dsl_dataset_t *ds, dmu_objset_type_t type, objset_t **osp) 293 { 294 objset_t *os; 295 int err; 296 297 os = kmem_alloc(sizeof (objset_t), KM_SLEEP); 298 err = dmu_objset_open_ds_os(ds, os, type); 299 if (err) 300 kmem_free(os, sizeof (objset_t)); 301 else 302 *osp = os; 303 return (err); 304 } 305 306 /* called from zpl */ 307 int 308 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode, 309 objset_t **osp) 310 { 311 objset_t *os; 312 dsl_dataset_t *ds; 313 int err; 314 315 ASSERT(DS_MODE_TYPE(mode) == DS_MODE_USER || 316 DS_MODE_TYPE(mode) == DS_MODE_OWNER); 317 318 os = kmem_alloc(sizeof (objset_t), KM_SLEEP); 319 if (DS_MODE_TYPE(mode) == DS_MODE_USER) 320 err = dsl_dataset_hold(name, os, &ds); 321 else 322 err = dsl_dataset_own(name, mode, os, &ds); 323 if (err) { 324 kmem_free(os, sizeof (objset_t)); 325 return (err); 326 } 327 328 err = dmu_objset_open_ds_os(ds, os, type); 329 if (err) { 330 if (DS_MODE_TYPE(mode) == DS_MODE_USER) 331 dsl_dataset_rele(ds, os); 332 else 333 dsl_dataset_disown(ds, os); 334 kmem_free(os, sizeof (objset_t)); 335 } else { 336 os->os_mode = mode; 337 *osp = os; 338 } 339 return (err); 340 } 341 342 void 343 dmu_objset_close(objset_t *os) 344 { 345 ASSERT(DS_MODE_TYPE(os->os_mode) == DS_MODE_USER || 346 DS_MODE_TYPE(os->os_mode) == DS_MODE_OWNER || 347 DS_MODE_TYPE(os->os_mode) == DS_MODE_NOHOLD); 348 349 if (DS_MODE_TYPE(os->os_mode) == DS_MODE_USER) 350 dsl_dataset_rele(os->os->os_dsl_dataset, os); 351 else if (DS_MODE_TYPE(os->os_mode) == DS_MODE_OWNER) 352 dsl_dataset_disown(os->os->os_dsl_dataset, os); 353 kmem_free(os, sizeof (objset_t)); 354 } 355 356 int 357 dmu_objset_evict_dbufs(objset_t *os) 358 { 359 objset_impl_t *osi = os->os; 360 dnode_t *dn; 361 362 mutex_enter(&osi->os_lock); 363 364 /* process the mdn last, since the other dnodes have holds on it */ 365 list_remove(&osi->os_dnodes, osi->os_meta_dnode); 366 list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode); 367 368 /* 369 * Find the first dnode with holds. We have to do this dance 370 * because dnode_add_ref() only works if you already have a 371 * hold. If there are no holds then it has no dbufs so OK to 372 * skip. 373 */ 374 for (dn = list_head(&osi->os_dnodes); 375 dn && !dnode_add_ref(dn, FTAG); 376 dn = list_next(&osi->os_dnodes, dn)) 377 continue; 378 379 while (dn) { 380 dnode_t *next_dn = dn; 381 382 do { 383 next_dn = list_next(&osi->os_dnodes, next_dn); 384 } while (next_dn && !dnode_add_ref(next_dn, FTAG)); 385 386 mutex_exit(&osi->os_lock); 387 dnode_evict_dbufs(dn); 388 dnode_rele(dn, FTAG); 389 mutex_enter(&osi->os_lock); 390 dn = next_dn; 391 } 392 mutex_exit(&osi->os_lock); 393 return (list_head(&osi->os_dnodes) != osi->os_meta_dnode); 394 } 395 396 void 397 dmu_objset_evict(dsl_dataset_t *ds, void *arg) 398 { 399 objset_impl_t *osi = arg; 400 objset_t os; 401 int i; 402 403 for (i = 0; i < TXG_SIZE; i++) { 404 ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL); 405 ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL); 406 } 407 408 if (ds && ds->ds_phys && ds->ds_phys->ds_num_children == 0) { 409 VERIFY(0 == dsl_prop_unregister(ds, "checksum", 410 checksum_changed_cb, osi)); 411 VERIFY(0 == dsl_prop_unregister(ds, "compression", 412 compression_changed_cb, osi)); 413 VERIFY(0 == dsl_prop_unregister(ds, "copies", 414 copies_changed_cb, osi)); 415 } 416 417 /* 418 * We should need only a single pass over the dnode list, since 419 * nothing can be added to the list at this point. 420 */ 421 os.os = osi; 422 (void) dmu_objset_evict_dbufs(&os); 423 424 ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode); 425 ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode); 426 ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL); 427 428 dnode_special_close(osi->os_meta_dnode); 429 zil_free(osi->os_zil); 430 431 VERIFY(arc_buf_remove_ref(osi->os_phys_buf, &osi->os_phys_buf) == 1); 432 mutex_destroy(&osi->os_lock); 433 mutex_destroy(&osi->os_obj_lock); 434 mutex_destroy(&osi->os_user_ptr_lock); 435 kmem_free(osi, sizeof (objset_impl_t)); 436 } 437 438 /* called from dsl for meta-objset */ 439 objset_impl_t * 440 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 441 dmu_objset_type_t type, dmu_tx_t *tx) 442 { 443 objset_impl_t *osi; 444 dnode_t *mdn; 445 446 ASSERT(dmu_tx_is_syncing(tx)); 447 if (ds) 448 mutex_enter(&ds->ds_opening_lock); 449 VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &osi)); 450 if (ds) 451 mutex_exit(&ds->ds_opening_lock); 452 mdn = osi->os_meta_dnode; 453 454 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 455 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 456 457 /* 458 * We don't want to have to increase the meta-dnode's nlevels 459 * later, because then we could do it in quescing context while 460 * we are also accessing it in open context. 461 * 462 * This precaution is not necessary for the MOS (ds == NULL), 463 * because the MOS is only updated in syncing context. 464 * This is most fortunate: the MOS is the only objset that 465 * needs to be synced multiple times as spa_sync() iterates 466 * to convergence, so minimizing its dn_nlevels matters. 467 */ 468 if (ds != NULL) { 469 int levels = 1; 470 471 /* 472 * Determine the number of levels necessary for the meta-dnode 473 * to contain DN_MAX_OBJECT dnodes. 474 */ 475 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + 476 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 477 DN_MAX_OBJECT * sizeof (dnode_phys_t)) 478 levels++; 479 480 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 481 mdn->dn_nlevels = levels; 482 } 483 484 ASSERT(type != DMU_OST_NONE); 485 ASSERT(type != DMU_OST_ANY); 486 ASSERT(type < DMU_OST_NUMTYPES); 487 osi->os_phys->os_type = type; 488 489 dsl_dataset_dirty(ds, tx); 490 491 return (osi); 492 } 493 494 struct oscarg { 495 void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); 496 void *userarg; 497 dsl_dataset_t *clone_parent; 498 const char *lastname; 499 dmu_objset_type_t type; 500 uint64_t flags; 501 }; 502 503 /*ARGSUSED*/ 504 static int 505 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx) 506 { 507 dsl_dir_t *dd = arg1; 508 struct oscarg *oa = arg2; 509 objset_t *mos = dd->dd_pool->dp_meta_objset; 510 int err; 511 uint64_t ddobj; 512 513 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, 514 oa->lastname, sizeof (uint64_t), 1, &ddobj); 515 if (err != ENOENT) 516 return (err ? err : EEXIST); 517 518 if (oa->clone_parent != NULL) { 519 /* 520 * You can't clone across pools. 521 */ 522 if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool) 523 return (EXDEV); 524 525 /* 526 * You can only clone snapshots, not the head datasets. 527 */ 528 if (oa->clone_parent->ds_phys->ds_num_children == 0) 529 return (EINVAL); 530 } 531 532 return (0); 533 } 534 535 static void 536 dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 537 { 538 dsl_dir_t *dd = arg1; 539 struct oscarg *oa = arg2; 540 dsl_dataset_t *ds; 541 blkptr_t *bp; 542 uint64_t dsobj; 543 544 ASSERT(dmu_tx_is_syncing(tx)); 545 546 dsobj = dsl_dataset_create_sync(dd, oa->lastname, 547 oa->clone_parent, oa->flags, cr, tx); 548 549 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj, FTAG, &ds)); 550 bp = dsl_dataset_get_blkptr(ds); 551 if (BP_IS_HOLE(bp)) { 552 objset_impl_t *osi; 553 554 /* This is an empty dmu_objset; not a clone. */ 555 osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds), 556 ds, bp, oa->type, tx); 557 558 if (oa->userfunc) 559 oa->userfunc(&osi->os, oa->userarg, cr, tx); 560 } 561 562 spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa, 563 tx, cr, "dataset = %llu", dsobj); 564 565 dsl_dataset_rele(ds, FTAG); 566 } 567 568 int 569 dmu_objset_create(const char *name, dmu_objset_type_t type, 570 objset_t *clone_parent, uint64_t flags, 571 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 572 { 573 dsl_dir_t *pdd; 574 const char *tail; 575 int err = 0; 576 struct oscarg oa = { 0 }; 577 578 ASSERT(strchr(name, '@') == NULL); 579 err = dsl_dir_open(name, FTAG, &pdd, &tail); 580 if (err) 581 return (err); 582 if (tail == NULL) { 583 dsl_dir_close(pdd, FTAG); 584 return (EEXIST); 585 } 586 587 dprintf("name=%s\n", name); 588 589 oa.userfunc = func; 590 oa.userarg = arg; 591 oa.lastname = tail; 592 oa.type = type; 593 oa.flags = flags; 594 595 if (clone_parent != NULL) { 596 /* 597 * You can't clone to a different type. 598 */ 599 if (clone_parent->os->os_phys->os_type != type) { 600 dsl_dir_close(pdd, FTAG); 601 return (EINVAL); 602 } 603 oa.clone_parent = clone_parent->os->os_dsl_dataset; 604 } 605 err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check, 606 dmu_objset_create_sync, pdd, &oa, 5); 607 dsl_dir_close(pdd, FTAG); 608 return (err); 609 } 610 611 int 612 dmu_objset_destroy(const char *name) 613 { 614 objset_t *os; 615 int error; 616 617 /* 618 * If it looks like we'll be able to destroy it, and there's 619 * an unplayed replay log sitting around, destroy the log. 620 * It would be nicer to do this in dsl_dataset_destroy_sync(), 621 * but the replay log objset is modified in open context. 622 */ 623 error = dmu_objset_open(name, DMU_OST_ANY, 624 DS_MODE_OWNER|DS_MODE_READONLY|DS_MODE_INCONSISTENT, &os); 625 if (error == 0) { 626 dsl_dataset_t *ds = os->os->os_dsl_dataset; 627 zil_destroy(dmu_objset_zil(os), B_FALSE); 628 629 error = dsl_dataset_destroy(ds, os); 630 /* 631 * dsl_dataset_destroy() closes the ds. 632 */ 633 kmem_free(os, sizeof (objset_t)); 634 } 635 636 return (error); 637 } 638 639 /* 640 * This will close the objset. 641 */ 642 int 643 dmu_objset_rollback(objset_t *os) 644 { 645 int err; 646 dsl_dataset_t *ds; 647 648 ds = os->os->os_dsl_dataset; 649 650 if (!dsl_dataset_tryown(ds, TRUE, os)) { 651 dmu_objset_close(os); 652 return (EBUSY); 653 } 654 655 err = dsl_dataset_rollback(ds, os->os->os_phys->os_type); 656 657 /* 658 * NB: we close the objset manually because the rollback 659 * actually implicitly called dmu_objset_evict(), thus freeing 660 * the objset_impl_t. 661 */ 662 dsl_dataset_disown(ds, os); 663 kmem_free(os, sizeof (objset_t)); 664 return (err); 665 } 666 667 struct snaparg { 668 dsl_sync_task_group_t *dstg; 669 char *snapname; 670 char failed[MAXPATHLEN]; 671 boolean_t checkperms; 672 list_t objsets; 673 }; 674 675 struct osnode { 676 list_node_t node; 677 objset_t *os; 678 }; 679 680 static int 681 dmu_objset_snapshot_one(char *name, void *arg) 682 { 683 struct snaparg *sn = arg; 684 objset_t *os; 685 int err; 686 687 (void) strcpy(sn->failed, name); 688 689 /* 690 * Check permissions only when requested. This only applies when 691 * doing a recursive snapshot. The permission checks for the starting 692 * dataset have already been performed in zfs_secpolicy_snapshot() 693 */ 694 if (sn->checkperms == B_TRUE && 695 (err = zfs_secpolicy_snapshot_perms(name, CRED()))) 696 return (err); 697 698 err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_USER, &os); 699 if (err != 0) 700 return (err); 701 702 /* If the objset is in an inconsistent state, return busy */ 703 if (os->os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) { 704 dmu_objset_close(os); 705 return (EBUSY); 706 } 707 708 /* 709 * NB: we need to wait for all in-flight changes to get to disk, 710 * so that we snapshot those changes. zil_suspend does this as 711 * a side effect. 712 */ 713 err = zil_suspend(dmu_objset_zil(os)); 714 if (err == 0) { 715 struct osnode *osn; 716 dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check, 717 dsl_dataset_snapshot_sync, os->os->os_dsl_dataset, 718 sn->snapname, 3); 719 osn = kmem_alloc(sizeof (struct osnode), KM_SLEEP); 720 osn->os = os; 721 list_insert_tail(&sn->objsets, osn); 722 } else { 723 dmu_objset_close(os); 724 } 725 726 return (err); 727 } 728 729 int 730 dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive) 731 { 732 dsl_sync_task_t *dst; 733 struct osnode *osn; 734 struct snaparg sn = { 0 }; 735 spa_t *spa; 736 int err; 737 738 (void) strcpy(sn.failed, fsname); 739 740 err = spa_open(fsname, &spa, FTAG); 741 if (err) 742 return (err); 743 744 sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 745 sn.snapname = snapname; 746 list_create(&sn.objsets, sizeof (struct osnode), 747 offsetof(struct osnode, node)); 748 749 if (recursive) { 750 sn.checkperms = B_TRUE; 751 err = dmu_objset_find(fsname, 752 dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN); 753 } else { 754 sn.checkperms = B_FALSE; 755 err = dmu_objset_snapshot_one(fsname, &sn); 756 } 757 758 if (err) 759 goto out; 760 761 err = dsl_sync_task_group_wait(sn.dstg); 762 763 for (dst = list_head(&sn.dstg->dstg_tasks); dst; 764 dst = list_next(&sn.dstg->dstg_tasks, dst)) { 765 dsl_dataset_t *ds = dst->dst_arg1; 766 if (dst->dst_err) 767 dsl_dataset_name(ds, sn.failed); 768 } 769 770 out: 771 while (osn = list_head(&sn.objsets)) { 772 list_remove(&sn.objsets, osn); 773 zil_resume(dmu_objset_zil(osn->os)); 774 dmu_objset_close(osn->os); 775 kmem_free(osn, sizeof (struct osnode)); 776 } 777 list_destroy(&sn.objsets); 778 779 if (err) 780 (void) strcpy(fsname, sn.failed); 781 dsl_sync_task_group_destroy(sn.dstg); 782 spa_close(spa, FTAG); 783 return (err); 784 } 785 786 static void 787 dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx) 788 { 789 dnode_t *dn; 790 791 while (dn = list_head(list)) { 792 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 793 ASSERT(dn->dn_dbuf->db_data_pending); 794 /* 795 * Initialize dn_zio outside dnode_sync() 796 * to accomodate meta-dnode 797 */ 798 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 799 ASSERT(dn->dn_zio); 800 801 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 802 list_remove(list, dn); 803 dnode_sync(dn, tx); 804 } 805 } 806 807 /* ARGSUSED */ 808 static void 809 ready(zio_t *zio, arc_buf_t *abuf, void *arg) 810 { 811 objset_impl_t *os = arg; 812 blkptr_t *bp = os->os_rootbp; 813 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 814 int i; 815 816 ASSERT(bp == zio->io_bp); 817 818 /* 819 * Update rootbp fill count. 820 */ 821 bp->blk_fill = 1; /* count the meta-dnode */ 822 for (i = 0; i < dnp->dn_nblkptr; i++) 823 bp->blk_fill += dnp->dn_blkptr[i].blk_fill; 824 825 BP_SET_TYPE(bp, DMU_OT_OBJSET); 826 BP_SET_LEVEL(bp, 0); 827 828 /* We must do this after we've set the bp's type and level */ 829 if (!DVA_EQUAL(BP_IDENTITY(bp), 830 BP_IDENTITY(&zio->io_bp_orig))) { 831 if (zio->io_bp_orig.blk_birth == os->os_synctx->tx_txg) 832 dsl_dataset_block_kill(os->os_dsl_dataset, 833 &zio->io_bp_orig, NULL, os->os_synctx); 834 dsl_dataset_block_born(os->os_dsl_dataset, bp, os->os_synctx); 835 } 836 } 837 838 /* ARGSUSED */ 839 static void 840 killer(zio_t *zio, arc_buf_t *abuf, void *arg) 841 { 842 objset_impl_t *os = arg; 843 844 ASSERT3U(zio->io_error, ==, 0); 845 arc_release(os->os_phys_buf, &os->os_phys_buf); 846 } 847 848 /* called from dsl */ 849 void 850 dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx) 851 { 852 int txgoff; 853 zbookmark_t zb; 854 zio_t *zio; 855 list_t *list; 856 dbuf_dirty_record_t *dr; 857 858 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 859 860 ASSERT(dmu_tx_is_syncing(tx)); 861 /* XXX the write_done callback should really give us the tx... */ 862 os->os_synctx = tx; 863 864 if (os->os_dsl_dataset == NULL) { 865 /* 866 * This is the MOS. If we have upgraded, 867 * spa_max_replication() could change, so reset 868 * os_copies here. 869 */ 870 os->os_copies = spa_max_replication(os->os_spa); 871 } 872 873 /* 874 * Create the root block IO 875 */ 876 zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; 877 zb.zb_object = 0; 878 zb.zb_level = -1; 879 zb.zb_blkid = 0; 880 if (BP_IS_OLDER(os->os_rootbp, tx->tx_txg)) { 881 dsl_dataset_block_kill(os->os_dsl_dataset, 882 os->os_rootbp, pio, tx); 883 } 884 zio = arc_write(pio, os->os_spa, os->os_md_checksum, 885 os->os_md_compress, 886 dmu_get_replication_level(os, &zb, DMU_OT_OBJSET), 887 tx->tx_txg, os->os_rootbp, os->os_phys_buf, ready, killer, os, 888 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_METADATA, 889 &zb); 890 891 /* 892 * Sync meta-dnode - the parent IO for the sync is the root block 893 */ 894 os->os_meta_dnode->dn_zio = zio; 895 dnode_sync(os->os_meta_dnode, tx); 896 897 txgoff = tx->tx_txg & TXG_MASK; 898 899 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], tx); 900 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], tx); 901 902 list = &os->os_meta_dnode->dn_dirty_records[txgoff]; 903 while (dr = list_head(list)) { 904 ASSERT(dr->dr_dbuf->db_level == 0); 905 list_remove(list, dr); 906 if (dr->dr_zio) 907 zio_nowait(dr->dr_zio); 908 } 909 /* 910 * Free intent log blocks up to this tx. 911 */ 912 zil_sync(os->os_zil, tx); 913 zio_nowait(zio); 914 } 915 916 void 917 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 918 uint64_t *usedobjsp, uint64_t *availobjsp) 919 { 920 dsl_dataset_space(os->os->os_dsl_dataset, refdbytesp, availbytesp, 921 usedobjsp, availobjsp); 922 } 923 924 uint64_t 925 dmu_objset_fsid_guid(objset_t *os) 926 { 927 return (dsl_dataset_fsid_guid(os->os->os_dsl_dataset)); 928 } 929 930 void 931 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 932 { 933 stat->dds_type = os->os->os_phys->os_type; 934 if (os->os->os_dsl_dataset) 935 dsl_dataset_fast_stat(os->os->os_dsl_dataset, stat); 936 } 937 938 void 939 dmu_objset_stats(objset_t *os, nvlist_t *nv) 940 { 941 ASSERT(os->os->os_dsl_dataset || 942 os->os->os_phys->os_type == DMU_OST_META); 943 944 if (os->os->os_dsl_dataset != NULL) 945 dsl_dataset_stats(os->os->os_dsl_dataset, nv); 946 947 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 948 os->os->os_phys->os_type); 949 } 950 951 int 952 dmu_objset_is_snapshot(objset_t *os) 953 { 954 if (os->os->os_dsl_dataset != NULL) 955 return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset)); 956 else 957 return (B_FALSE); 958 } 959 960 int 961 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 962 boolean_t *conflict) 963 { 964 dsl_dataset_t *ds = os->os->os_dsl_dataset; 965 uint64_t ignored; 966 967 if (ds->ds_phys->ds_snapnames_zapobj == 0) 968 return (ENOENT); 969 970 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 971 ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST, 972 real, maxlen, conflict)); 973 } 974 975 int 976 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 977 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 978 { 979 dsl_dataset_t *ds = os->os->os_dsl_dataset; 980 zap_cursor_t cursor; 981 zap_attribute_t attr; 982 983 if (ds->ds_phys->ds_snapnames_zapobj == 0) 984 return (ENOENT); 985 986 zap_cursor_init_serialized(&cursor, 987 ds->ds_dir->dd_pool->dp_meta_objset, 988 ds->ds_phys->ds_snapnames_zapobj, *offp); 989 990 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 991 zap_cursor_fini(&cursor); 992 return (ENOENT); 993 } 994 995 if (strlen(attr.za_name) + 1 > namelen) { 996 zap_cursor_fini(&cursor); 997 return (ENAMETOOLONG); 998 } 999 1000 (void) strcpy(name, attr.za_name); 1001 if (idp) 1002 *idp = attr.za_first_integer; 1003 if (case_conflict) 1004 *case_conflict = attr.za_normalization_conflict; 1005 zap_cursor_advance(&cursor); 1006 *offp = zap_cursor_serialize(&cursor); 1007 zap_cursor_fini(&cursor); 1008 1009 return (0); 1010 } 1011 1012 int 1013 dmu_dir_list_next(objset_t *os, int namelen, char *name, 1014 uint64_t *idp, uint64_t *offp) 1015 { 1016 dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir; 1017 zap_cursor_t cursor; 1018 zap_attribute_t attr; 1019 1020 /* there is no next dir on a snapshot! */ 1021 if (os->os->os_dsl_dataset->ds_object != 1022 dd->dd_phys->dd_head_dataset_obj) 1023 return (ENOENT); 1024 1025 zap_cursor_init_serialized(&cursor, 1026 dd->dd_pool->dp_meta_objset, 1027 dd->dd_phys->dd_child_dir_zapobj, *offp); 1028 1029 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1030 zap_cursor_fini(&cursor); 1031 return (ENOENT); 1032 } 1033 1034 if (strlen(attr.za_name) + 1 > namelen) { 1035 zap_cursor_fini(&cursor); 1036 return (ENAMETOOLONG); 1037 } 1038 1039 (void) strcpy(name, attr.za_name); 1040 if (idp) 1041 *idp = attr.za_first_integer; 1042 zap_cursor_advance(&cursor); 1043 *offp = zap_cursor_serialize(&cursor); 1044 zap_cursor_fini(&cursor); 1045 1046 return (0); 1047 } 1048 1049 /* 1050 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 1051 */ 1052 int 1053 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags) 1054 { 1055 dsl_dir_t *dd; 1056 objset_t *os; 1057 uint64_t snapobj; 1058 zap_cursor_t zc; 1059 zap_attribute_t *attr; 1060 char *child; 1061 int do_self, err; 1062 1063 err = dsl_dir_open(name, FTAG, &dd, NULL); 1064 if (err) 1065 return (err); 1066 1067 /* NB: the $MOS dir doesn't have a head dataset */ 1068 do_self = (dd->dd_phys->dd_head_dataset_obj != 0); 1069 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1070 1071 /* 1072 * Iterate over all children. 1073 */ 1074 if (flags & DS_FIND_CHILDREN) { 1075 for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, 1076 dd->dd_phys->dd_child_dir_zapobj); 1077 zap_cursor_retrieve(&zc, attr) == 0; 1078 (void) zap_cursor_advance(&zc)) { 1079 ASSERT(attr->za_integer_length == sizeof (uint64_t)); 1080 ASSERT(attr->za_num_integers == 1); 1081 1082 /* 1083 * No separating '/' because parent's name ends in /. 1084 */ 1085 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1086 /* XXX could probably just use name here */ 1087 dsl_dir_name(dd, child); 1088 (void) strcat(child, "/"); 1089 (void) strcat(child, attr->za_name); 1090 err = dmu_objset_find(child, func, arg, flags); 1091 kmem_free(child, MAXPATHLEN); 1092 if (err) 1093 break; 1094 } 1095 zap_cursor_fini(&zc); 1096 1097 if (err) { 1098 dsl_dir_close(dd, FTAG); 1099 kmem_free(attr, sizeof (zap_attribute_t)); 1100 return (err); 1101 } 1102 } 1103 1104 /* 1105 * Iterate over all snapshots. 1106 */ 1107 if ((flags & DS_FIND_SNAPSHOTS) && 1108 dmu_objset_open(name, DMU_OST_ANY, 1109 DS_MODE_USER | DS_MODE_READONLY, &os) == 0) { 1110 1111 snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj; 1112 dmu_objset_close(os); 1113 1114 for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj); 1115 zap_cursor_retrieve(&zc, attr) == 0; 1116 (void) zap_cursor_advance(&zc)) { 1117 ASSERT(attr->za_integer_length == sizeof (uint64_t)); 1118 ASSERT(attr->za_num_integers == 1); 1119 1120 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1121 /* XXX could probably just use name here */ 1122 dsl_dir_name(dd, child); 1123 (void) strcat(child, "@"); 1124 (void) strcat(child, attr->za_name); 1125 err = func(child, arg); 1126 kmem_free(child, MAXPATHLEN); 1127 if (err) 1128 break; 1129 } 1130 zap_cursor_fini(&zc); 1131 } 1132 1133 dsl_dir_close(dd, FTAG); 1134 kmem_free(attr, sizeof (zap_attribute_t)); 1135 1136 if (err) 1137 return (err); 1138 1139 /* 1140 * Apply to self if appropriate. 1141 */ 1142 if (do_self) 1143 err = func(name, arg); 1144 return (err); 1145 } 1146 1147 void 1148 dmu_objset_set_user(objset_t *os, void *user_ptr) 1149 { 1150 ASSERT(MUTEX_HELD(&os->os->os_user_ptr_lock)); 1151 os->os->os_user_ptr = user_ptr; 1152 } 1153 1154 void * 1155 dmu_objset_get_user(objset_t *os) 1156 { 1157 ASSERT(MUTEX_HELD(&os->os->os_user_ptr_lock)); 1158 return (os->os->os_user_ptr); 1159 } 1160