1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/dmu_objset.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/dsl_dataset.h> 32 #include <sys/dsl_prop.h> 33 #include <sys/dsl_pool.h> 34 #include <sys/dsl_synctask.h> 35 #include <sys/dnode.h> 36 #include <sys/dbuf.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/zio_checksum.h> 39 #include <sys/zap.h> 40 #include <sys/zil.h> 41 #include <sys/dmu_impl.h> 42 43 44 spa_t * 45 dmu_objset_spa(objset_t *os) 46 { 47 return (os->os->os_spa); 48 } 49 50 zilog_t * 51 dmu_objset_zil(objset_t *os) 52 { 53 return (os->os->os_zil); 54 } 55 56 dsl_pool_t * 57 dmu_objset_pool(objset_t *os) 58 { 59 dsl_dataset_t *ds; 60 61 if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir) 62 return (ds->ds_dir->dd_pool); 63 else 64 return (spa_get_dsl(os->os->os_spa)); 65 } 66 67 dsl_dataset_t * 68 dmu_objset_ds(objset_t *os) 69 { 70 return (os->os->os_dsl_dataset); 71 } 72 73 dmu_objset_type_t 74 dmu_objset_type(objset_t *os) 75 { 76 return (os->os->os_phys->os_type); 77 } 78 79 void 80 dmu_objset_name(objset_t *os, char *buf) 81 { 82 dsl_dataset_name(os->os->os_dsl_dataset, buf); 83 } 84 85 uint64_t 86 dmu_objset_id(objset_t *os) 87 { 88 dsl_dataset_t *ds = os->os->os_dsl_dataset; 89 90 return (ds ? ds->ds_object : 0); 91 } 92 93 static void 94 checksum_changed_cb(void *arg, uint64_t newval) 95 { 96 objset_impl_t *osi = arg; 97 98 /* 99 * Inheritance should have been done by now. 100 */ 101 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 102 103 osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 104 } 105 106 static void 107 compression_changed_cb(void *arg, uint64_t newval) 108 { 109 objset_impl_t *osi = arg; 110 111 /* 112 * Inheritance and range checking should have been done by now. 113 */ 114 ASSERT(newval != ZIO_COMPRESS_INHERIT); 115 116 osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE); 117 } 118 119 void 120 dmu_objset_byteswap(void *buf, size_t size) 121 { 122 objset_phys_t *osp = buf; 123 124 ASSERT(size == sizeof (objset_phys_t)); 125 dnode_byteswap(&osp->os_meta_dnode); 126 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 127 osp->os_type = BSWAP_64(osp->os_type); 128 } 129 130 int 131 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 132 objset_impl_t **osip) 133 { 134 objset_impl_t *winner, *osi; 135 int i, err, checksum; 136 137 osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP); 138 osi->os.os = osi; 139 osi->os_dsl_dataset = ds; 140 osi->os_spa = spa; 141 if (bp) 142 osi->os_rootbp = *bp; 143 osi->os_phys = zio_buf_alloc(sizeof (objset_phys_t)); 144 if (!BP_IS_HOLE(&osi->os_rootbp)) { 145 uint32_t aflags = ARC_WAIT; 146 zbookmark_t zb; 147 zb.zb_objset = ds ? ds->ds_object : 0; 148 zb.zb_object = 0; 149 zb.zb_level = -1; 150 zb.zb_blkid = 0; 151 152 dprintf_bp(&osi->os_rootbp, "reading %s", ""); 153 err = arc_read(NULL, spa, &osi->os_rootbp, 154 dmu_ot[DMU_OT_OBJSET].ot_byteswap, 155 arc_bcopy_func, osi->os_phys, 156 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 157 if (err) { 158 zio_buf_free(osi->os_phys, sizeof (objset_phys_t)); 159 kmem_free(osi, sizeof (objset_impl_t)); 160 return (err); 161 } 162 } else { 163 bzero(osi->os_phys, sizeof (objset_phys_t)); 164 } 165 166 /* 167 * Note: the changed_cb will be called once before the register 168 * func returns, thus changing the checksum/compression from the 169 * default (fletcher2/off). Snapshots don't need to know, and 170 * registering would complicate clone promotion. 171 */ 172 if (ds && ds->ds_phys->ds_num_children == 0) { 173 err = dsl_prop_register(ds, "checksum", 174 checksum_changed_cb, osi); 175 if (err == 0) 176 err = dsl_prop_register(ds, "compression", 177 compression_changed_cb, osi); 178 if (err) { 179 zio_buf_free(osi->os_phys, sizeof (objset_phys_t)); 180 kmem_free(osi, sizeof (objset_impl_t)); 181 return (err); 182 } 183 } else if (ds == NULL) { 184 /* It's the meta-objset. */ 185 osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 186 osi->os_compress = ZIO_COMPRESS_LZJB; 187 } 188 189 osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header); 190 191 /* 192 * Metadata always gets compressed and checksummed. 193 * If the data checksum is multi-bit correctable, and it's not 194 * a ZBT-style checksum, then it's suitable for metadata as well. 195 * Otherwise, the metadata checksum defaults to fletcher4. 196 */ 197 checksum = osi->os_checksum; 198 199 if (zio_checksum_table[checksum].ci_correctable && 200 !zio_checksum_table[checksum].ci_zbt) 201 osi->os_md_checksum = checksum; 202 else 203 osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4; 204 osi->os_md_compress = ZIO_COMPRESS_LZJB; 205 206 for (i = 0; i < TXG_SIZE; i++) { 207 list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t), 208 offsetof(dnode_t, dn_dirty_link[i])); 209 list_create(&osi->os_free_dnodes[i], sizeof (dnode_t), 210 offsetof(dnode_t, dn_dirty_link[i])); 211 } 212 list_create(&osi->os_dnodes, sizeof (dnode_t), 213 offsetof(dnode_t, dn_link)); 214 list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 215 offsetof(dmu_buf_impl_t, db_link)); 216 217 mutex_init(&osi->os_lock, NULL, MUTEX_DEFAULT, NULL); 218 mutex_init(&osi->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 219 220 osi->os_meta_dnode = dnode_special_open(osi, 221 &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT); 222 223 if (ds != NULL) { 224 winner = dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict); 225 if (winner) { 226 dmu_objset_evict(ds, osi); 227 osi = winner; 228 } 229 } 230 231 *osip = osi; 232 return (0); 233 } 234 235 /* called from zpl */ 236 int 237 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode, 238 objset_t **osp) 239 { 240 dsl_dataset_t *ds; 241 int err; 242 objset_t *os; 243 objset_impl_t *osi; 244 245 os = kmem_alloc(sizeof (objset_t), KM_SLEEP); 246 err = dsl_dataset_open(name, mode, os, &ds); 247 if (err) { 248 kmem_free(os, sizeof (objset_t)); 249 return (err); 250 } 251 252 osi = dsl_dataset_get_user_ptr(ds); 253 if (osi == NULL) { 254 blkptr_t bp; 255 256 dsl_dataset_get_blkptr(ds, &bp); 257 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 258 ds, &bp, &osi); 259 if (err) { 260 dsl_dataset_close(ds, mode, os); 261 kmem_free(os, sizeof (objset_t)); 262 return (err); 263 } 264 } 265 266 os->os = osi; 267 os->os_mode = mode; 268 269 if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) { 270 dmu_objset_close(os); 271 return (EINVAL); 272 } 273 *osp = os; 274 return (0); 275 } 276 277 void 278 dmu_objset_close(objset_t *os) 279 { 280 dsl_dataset_close(os->os->os_dsl_dataset, os->os_mode, os); 281 kmem_free(os, sizeof (objset_t)); 282 } 283 284 int 285 dmu_objset_evict_dbufs(objset_t *os, int try) 286 { 287 objset_impl_t *osi = os->os; 288 dnode_t *dn; 289 290 mutex_enter(&osi->os_lock); 291 292 /* process the mdn last, since the other dnodes have holds on it */ 293 list_remove(&osi->os_dnodes, osi->os_meta_dnode); 294 list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode); 295 296 /* 297 * Find the first dnode with holds. We have to do this dance 298 * because dnode_add_ref() only works if you already have a 299 * hold. If there are no holds then it has no dbufs so OK to 300 * skip. 301 */ 302 for (dn = list_head(&osi->os_dnodes); 303 dn && refcount_is_zero(&dn->dn_holds); 304 dn = list_next(&osi->os_dnodes, dn)) 305 continue; 306 if (dn) 307 dnode_add_ref(dn, FTAG); 308 309 while (dn) { 310 dnode_t *next_dn = dn; 311 312 do { 313 next_dn = list_next(&osi->os_dnodes, next_dn); 314 } while (next_dn && refcount_is_zero(&next_dn->dn_holds)); 315 if (next_dn) 316 dnode_add_ref(next_dn, FTAG); 317 318 mutex_exit(&osi->os_lock); 319 if (dnode_evict_dbufs(dn, try)) { 320 dnode_rele(dn, FTAG); 321 if (next_dn) 322 dnode_rele(next_dn, FTAG); 323 return (1); 324 } 325 dnode_rele(dn, FTAG); 326 mutex_enter(&osi->os_lock); 327 dn = next_dn; 328 } 329 mutex_exit(&osi->os_lock); 330 return (0); 331 } 332 333 void 334 dmu_objset_evict(dsl_dataset_t *ds, void *arg) 335 { 336 objset_impl_t *osi = arg; 337 objset_t os; 338 int i; 339 340 for (i = 0; i < TXG_SIZE; i++) { 341 ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL); 342 ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL); 343 } 344 345 if (ds && ds->ds_phys->ds_num_children == 0) { 346 VERIFY(0 == dsl_prop_unregister(ds, "checksum", 347 checksum_changed_cb, osi)); 348 VERIFY(0 == dsl_prop_unregister(ds, "compression", 349 compression_changed_cb, osi)); 350 } 351 352 /* 353 * We should need only a single pass over the dnode list, since 354 * nothing can be added to the list at this point. 355 */ 356 os.os = osi; 357 (void) dmu_objset_evict_dbufs(&os, 0); 358 359 ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode); 360 ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode); 361 ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL); 362 363 dnode_special_close(osi->os_meta_dnode); 364 zil_free(osi->os_zil); 365 366 zio_buf_free(osi->os_phys, sizeof (objset_phys_t)); 367 mutex_destroy(&osi->os_lock); 368 mutex_destroy(&osi->os_obj_lock); 369 kmem_free(osi, sizeof (objset_impl_t)); 370 } 371 372 /* called from dsl for meta-objset */ 373 objset_impl_t * 374 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, dmu_objset_type_t type, 375 dmu_tx_t *tx) 376 { 377 objset_impl_t *osi; 378 dnode_t *mdn; 379 380 ASSERT(dmu_tx_is_syncing(tx)); 381 VERIFY(0 == dmu_objset_open_impl(spa, ds, NULL, &osi)); 382 mdn = osi->os_meta_dnode; 383 384 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 385 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 386 387 /* 388 * We don't want to have to increase the meta-dnode's nlevels 389 * later, because then we could do it in quescing context while 390 * we are also accessing it in open context. 391 * 392 * This precaution is not necessary for the MOS (ds == NULL), 393 * because the MOS is only updated in syncing context. 394 * This is most fortunate: the MOS is the only objset that 395 * needs to be synced multiple times as spa_sync() iterates 396 * to convergence, so minimizing its dn_nlevels matters. 397 */ 398 if (ds != NULL) { 399 int levels = 1; 400 401 /* 402 * Determine the number of levels necessary for the meta-dnode 403 * to contain DN_MAX_OBJECT dnodes. 404 */ 405 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + 406 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 407 DN_MAX_OBJECT * sizeof (dnode_phys_t)) 408 levels++; 409 410 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 411 mdn->dn_nlevels = levels; 412 } 413 414 ASSERT(type != DMU_OST_NONE); 415 ASSERT(type != DMU_OST_ANY); 416 ASSERT(type < DMU_OST_NUMTYPES); 417 osi->os_phys->os_type = type; 418 419 dsl_dataset_dirty(ds, tx); 420 421 return (osi); 422 } 423 424 struct oscarg { 425 void (*userfunc)(objset_t *os, void *arg, dmu_tx_t *tx); 426 void *userarg; 427 dsl_dataset_t *clone_parent; 428 const char *lastname; 429 dmu_objset_type_t type; 430 }; 431 432 /* ARGSUSED */ 433 static int 434 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx) 435 { 436 dsl_dir_t *dd = arg1; 437 struct oscarg *oa = arg2; 438 objset_t *mos = dd->dd_pool->dp_meta_objset; 439 int err; 440 uint64_t ddobj; 441 442 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, 443 oa->lastname, sizeof (uint64_t), 1, &ddobj); 444 if (err != ENOENT) 445 return (err ? err : EEXIST); 446 447 if (oa->clone_parent != NULL) { 448 /* 449 * You can't clone across pools. 450 */ 451 if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool) 452 return (EXDEV); 453 454 /* 455 * You can only clone snapshots, not the head datasets. 456 */ 457 if (oa->clone_parent->ds_phys->ds_num_children == 0) 458 return (EINVAL); 459 } 460 return (0); 461 } 462 463 static void 464 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx) 465 { 466 dsl_dir_t *dd = arg1; 467 struct oscarg *oa = arg2; 468 dsl_dataset_t *ds; 469 blkptr_t bp; 470 uint64_t dsobj; 471 472 ASSERT(dmu_tx_is_syncing(tx)); 473 474 dsobj = dsl_dataset_create_sync(dd, oa->lastname, 475 oa->clone_parent, tx); 476 477 VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, dsobj, NULL, 478 DS_MODE_STANDARD | DS_MODE_READONLY, FTAG, &ds)); 479 dsl_dataset_get_blkptr(ds, &bp); 480 if (BP_IS_HOLE(&bp)) { 481 objset_impl_t *osi; 482 483 /* This is an empty dmu_objset; not a clone. */ 484 osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds), 485 ds, oa->type, tx); 486 487 if (oa->userfunc) 488 oa->userfunc(&osi->os, oa->userarg, tx); 489 } 490 dsl_dataset_close(ds, DS_MODE_STANDARD | DS_MODE_READONLY, FTAG); 491 } 492 493 int 494 dmu_objset_create(const char *name, dmu_objset_type_t type, 495 objset_t *clone_parent, 496 void (*func)(objset_t *os, void *arg, dmu_tx_t *tx), void *arg) 497 { 498 dsl_dir_t *pdd; 499 const char *tail; 500 int err = 0; 501 struct oscarg oa = { 0 }; 502 503 ASSERT(strchr(name, '@') == NULL); 504 err = dsl_dir_open(name, FTAG, &pdd, &tail); 505 if (err) 506 return (err); 507 if (tail == NULL) { 508 dsl_dir_close(pdd, FTAG); 509 return (EEXIST); 510 } 511 512 dprintf("name=%s\n", name); 513 514 oa.userfunc = func; 515 oa.userarg = arg; 516 oa.lastname = tail; 517 oa.type = type; 518 if (clone_parent != NULL) { 519 /* 520 * You can't clone to a different type. 521 */ 522 if (clone_parent->os->os_phys->os_type != type) { 523 dsl_dir_close(pdd, FTAG); 524 return (EINVAL); 525 } 526 oa.clone_parent = clone_parent->os->os_dsl_dataset; 527 } 528 err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check, 529 dmu_objset_create_sync, pdd, &oa, 5); 530 dsl_dir_close(pdd, FTAG); 531 return (err); 532 } 533 534 int 535 dmu_objset_destroy(const char *name) 536 { 537 objset_t *os; 538 int error; 539 540 /* 541 * If it looks like we'll be able to destroy it, and there's 542 * an unplayed replay log sitting around, destroy the log. 543 * It would be nicer to do this in dsl_dataset_destroy_sync(), 544 * but the replay log objset is modified in open context. 545 */ 546 error = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os); 547 if (error == 0) { 548 zil_destroy(dmu_objset_zil(os), B_FALSE); 549 dmu_objset_close(os); 550 } 551 552 return (dsl_dataset_destroy(name)); 553 } 554 555 int 556 dmu_objset_rollback(const char *name) 557 { 558 int err; 559 objset_t *os; 560 561 err = dmu_objset_open(name, DMU_OST_ANY, 562 DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, &os); 563 if (err == 0) { 564 err = zil_suspend(dmu_objset_zil(os)); 565 if (err == 0) 566 zil_resume(dmu_objset_zil(os)); 567 if (err == 0) { 568 /* XXX uncache everything? */ 569 err = dsl_dataset_rollback(os->os->os_dsl_dataset); 570 } 571 dmu_objset_close(os); 572 } 573 return (err); 574 } 575 576 struct snaparg { 577 dsl_sync_task_group_t *dstg; 578 char *snapname; 579 char failed[MAXPATHLEN]; 580 }; 581 582 static int 583 dmu_objset_snapshot_one(char *name, void *arg) 584 { 585 struct snaparg *sn = arg; 586 objset_t *os; 587 int err; 588 589 (void) strcpy(sn->failed, name); 590 591 err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_STANDARD, &os); 592 if (err != 0) 593 return (err); 594 595 /* 596 * NB: we need to wait for all in-flight changes to get to disk, 597 * so that we snapshot those changes. zil_suspend does this as 598 * a side effect. 599 */ 600 err = zil_suspend(dmu_objset_zil(os)); 601 if (err == 0) { 602 dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check, 603 dsl_dataset_snapshot_sync, os, sn->snapname, 3); 604 } 605 return (err); 606 } 607 608 int 609 dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive) 610 { 611 dsl_sync_task_t *dst; 612 struct snaparg sn = { 0 }; 613 char *cp; 614 spa_t *spa; 615 int err; 616 617 (void) strcpy(sn.failed, fsname); 618 619 cp = strchr(fsname, '/'); 620 if (cp) { 621 *cp = '\0'; 622 err = spa_open(fsname, &spa, FTAG); 623 *cp = '/'; 624 } else { 625 err = spa_open(fsname, &spa, FTAG); 626 } 627 if (err) 628 return (err); 629 630 sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 631 sn.snapname = snapname; 632 633 if (recursive) { 634 err = dmu_objset_find(fsname, 635 dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN); 636 } else { 637 err = dmu_objset_snapshot_one(fsname, &sn); 638 } 639 640 if (err) 641 goto out; 642 643 err = dsl_sync_task_group_wait(sn.dstg); 644 645 for (dst = list_head(&sn.dstg->dstg_tasks); dst; 646 dst = list_next(&sn.dstg->dstg_tasks, dst)) { 647 objset_t *os = dst->dst_arg1; 648 if (dst->dst_err) 649 dmu_objset_name(os, sn.failed); 650 zil_resume(dmu_objset_zil(os)); 651 dmu_objset_close(os); 652 } 653 out: 654 if (err) 655 (void) strcpy(fsname, sn.failed); 656 dsl_sync_task_group_destroy(sn.dstg); 657 spa_close(spa, FTAG); 658 return (err); 659 } 660 661 static void 662 dmu_objset_sync_dnodes(objset_impl_t *os, list_t *list, dmu_tx_t *tx) 663 { 664 dnode_t *dn = list_head(list); 665 int level, err; 666 667 for (level = 0; dn = list_head(list); level++) { 668 zio_t *zio; 669 zio = zio_root(os->os_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 670 671 ASSERT3U(level, <=, DN_MAX_LEVELS); 672 673 while (dn) { 674 dnode_t *next = list_next(list, dn); 675 676 list_remove(list, dn); 677 if (dnode_sync(dn, level, zio, tx) == 0) { 678 /* 679 * This dnode requires syncing at higher 680 * levels; put it back onto the list. 681 */ 682 if (next) 683 list_insert_before(list, next, dn); 684 else 685 list_insert_tail(list, dn); 686 } 687 dn = next; 688 } 689 err = zio_wait(zio); 690 ASSERT(err == 0); 691 } 692 } 693 694 /* ARGSUSED */ 695 static void 696 killer(zio_t *zio, arc_buf_t *abuf, void *arg) 697 { 698 objset_impl_t *os = arg; 699 objset_phys_t *osphys = zio->io_data; 700 dnode_phys_t *dnp = &osphys->os_meta_dnode; 701 int i; 702 703 ASSERT3U(zio->io_error, ==, 0); 704 705 /* 706 * Update rootbp fill count. 707 */ 708 os->os_rootbp.blk_fill = 1; /* count the meta-dnode */ 709 for (i = 0; i < dnp->dn_nblkptr; i++) 710 os->os_rootbp.blk_fill += dnp->dn_blkptr[i].blk_fill; 711 712 BP_SET_TYPE(zio->io_bp, DMU_OT_OBJSET); 713 BP_SET_LEVEL(zio->io_bp, 0); 714 715 if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp), 716 BP_IDENTITY(&zio->io_bp_orig))) { 717 dsl_dataset_block_kill(os->os_dsl_dataset, &zio->io_bp_orig, 718 os->os_synctx); 719 dsl_dataset_block_born(os->os_dsl_dataset, zio->io_bp, 720 os->os_synctx); 721 } 722 } 723 724 725 /* called from dsl */ 726 void 727 dmu_objset_sync(objset_impl_t *os, dmu_tx_t *tx) 728 { 729 extern taskq_t *dbuf_tq; 730 int txgoff; 731 list_t *dirty_list; 732 int err; 733 zbookmark_t zb; 734 arc_buf_t *abuf = 735 arc_buf_alloc(os->os_spa, sizeof (objset_phys_t), FTAG); 736 737 ASSERT(dmu_tx_is_syncing(tx)); 738 ASSERT(os->os_synctx == NULL); 739 /* XXX the write_done callback should really give us the tx... */ 740 os->os_synctx = tx; 741 742 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 743 744 txgoff = tx->tx_txg & TXG_MASK; 745 746 dmu_objset_sync_dnodes(os, &os->os_free_dnodes[txgoff], tx); 747 dmu_objset_sync_dnodes(os, &os->os_dirty_dnodes[txgoff], tx); 748 749 /* 750 * Free intent log blocks up to this tx. 751 */ 752 zil_sync(os->os_zil, tx); 753 754 /* 755 * Sync meta-dnode 756 */ 757 dirty_list = &os->os_dirty_dnodes[txgoff]; 758 ASSERT(list_head(dirty_list) == NULL); 759 list_insert_tail(dirty_list, os->os_meta_dnode); 760 dmu_objset_sync_dnodes(os, dirty_list, tx); 761 762 /* 763 * Sync the root block. 764 */ 765 bcopy(os->os_phys, abuf->b_data, sizeof (objset_phys_t)); 766 zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; 767 zb.zb_object = 0; 768 zb.zb_level = -1; 769 zb.zb_blkid = 0; 770 err = arc_write(NULL, os->os_spa, os->os_md_checksum, 771 os->os_md_compress, 772 dmu_get_replication_level(os->os_spa, &zb, DMU_OT_OBJSET), 773 tx->tx_txg, &os->os_rootbp, abuf, killer, os, 774 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, ARC_WAIT, &zb); 775 ASSERT(err == 0); 776 VERIFY(arc_buf_remove_ref(abuf, FTAG) == 1); 777 778 dsl_dataset_set_blkptr(os->os_dsl_dataset, &os->os_rootbp, tx); 779 780 ASSERT3P(os->os_synctx, ==, tx); 781 taskq_wait(dbuf_tq); 782 os->os_synctx = NULL; 783 } 784 785 void 786 dmu_objset_stats(objset_t *os, dmu_objset_stats_t *dds) 787 { 788 if (os->os->os_dsl_dataset != NULL) { 789 dsl_dataset_stats(os->os->os_dsl_dataset, dds); 790 } else { 791 ASSERT(os->os->os_phys->os_type == DMU_OST_META); 792 bzero(dds, sizeof (*dds)); 793 } 794 dds->dds_type = os->os->os_phys->os_type; 795 } 796 797 int 798 dmu_objset_is_snapshot(objset_t *os) 799 { 800 if (os->os->os_dsl_dataset != NULL) 801 return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset)); 802 else 803 return (B_FALSE); 804 } 805 806 int 807 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 808 uint64_t *idp, uint64_t *offp) 809 { 810 dsl_dataset_t *ds = os->os->os_dsl_dataset; 811 zap_cursor_t cursor; 812 zap_attribute_t attr; 813 814 if (ds->ds_phys->ds_snapnames_zapobj == 0) 815 return (ENOENT); 816 817 zap_cursor_init_serialized(&cursor, 818 ds->ds_dir->dd_pool->dp_meta_objset, 819 ds->ds_phys->ds_snapnames_zapobj, *offp); 820 821 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 822 zap_cursor_fini(&cursor); 823 return (ENOENT); 824 } 825 826 if (strlen(attr.za_name) + 1 > namelen) { 827 zap_cursor_fini(&cursor); 828 return (ENAMETOOLONG); 829 } 830 831 (void) strcpy(name, attr.za_name); 832 if (idp) 833 *idp = attr.za_first_integer; 834 zap_cursor_advance(&cursor); 835 *offp = zap_cursor_serialize(&cursor); 836 zap_cursor_fini(&cursor); 837 838 return (0); 839 } 840 841 int 842 dmu_dir_list_next(objset_t *os, int namelen, char *name, 843 uint64_t *idp, uint64_t *offp) 844 { 845 dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir; 846 zap_cursor_t cursor; 847 zap_attribute_t attr; 848 849 /* there is no next dir on a snapshot! */ 850 if (os->os->os_dsl_dataset->ds_object != 851 dd->dd_phys->dd_head_dataset_obj) 852 return (ENOENT); 853 854 zap_cursor_init_serialized(&cursor, 855 dd->dd_pool->dp_meta_objset, 856 dd->dd_phys->dd_child_dir_zapobj, *offp); 857 858 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 859 zap_cursor_fini(&cursor); 860 return (ENOENT); 861 } 862 863 if (strlen(attr.za_name) + 1 > namelen) { 864 zap_cursor_fini(&cursor); 865 return (ENAMETOOLONG); 866 } 867 868 (void) strcpy(name, attr.za_name); 869 if (idp) 870 *idp = attr.za_first_integer; 871 zap_cursor_advance(&cursor); 872 *offp = zap_cursor_serialize(&cursor); 873 zap_cursor_fini(&cursor); 874 875 return (0); 876 } 877 878 /* 879 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 880 */ 881 int 882 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags) 883 { 884 dsl_dir_t *dd; 885 objset_t *os; 886 uint64_t snapobj; 887 zap_cursor_t zc; 888 zap_attribute_t attr; 889 char *child; 890 int do_self, err; 891 892 err = dsl_dir_open(name, FTAG, &dd, NULL); 893 if (err) 894 return (err); 895 896 /* NB: the $MOS dir doesn't have a head dataset */ 897 do_self = (dd->dd_phys->dd_head_dataset_obj != 0); 898 899 /* 900 * Iterate over all children. 901 */ 902 if (flags & DS_FIND_CHILDREN) { 903 for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, 904 dd->dd_phys->dd_child_dir_zapobj); 905 zap_cursor_retrieve(&zc, &attr) == 0; 906 (void) zap_cursor_advance(&zc)) { 907 ASSERT(attr.za_integer_length == sizeof (uint64_t)); 908 ASSERT(attr.za_num_integers == 1); 909 910 /* 911 * No separating '/' because parent's name ends in /. 912 */ 913 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 914 /* XXX could probably just use name here */ 915 dsl_dir_name(dd, child); 916 (void) strcat(child, "/"); 917 (void) strcat(child, attr.za_name); 918 err = dmu_objset_find(child, func, arg, flags); 919 kmem_free(child, MAXPATHLEN); 920 if (err) 921 break; 922 } 923 zap_cursor_fini(&zc); 924 925 if (err) { 926 dsl_dir_close(dd, FTAG); 927 return (err); 928 } 929 } 930 931 /* 932 * Iterate over all snapshots. 933 */ 934 if ((flags & DS_FIND_SNAPSHOTS) && 935 dmu_objset_open(name, DMU_OST_ANY, 936 DS_MODE_STANDARD | DS_MODE_READONLY, &os) == 0) { 937 938 snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj; 939 dmu_objset_close(os); 940 941 for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj); 942 zap_cursor_retrieve(&zc, &attr) == 0; 943 (void) zap_cursor_advance(&zc)) { 944 ASSERT(attr.za_integer_length == sizeof (uint64_t)); 945 ASSERT(attr.za_num_integers == 1); 946 947 child = kmem_alloc(MAXPATHLEN, KM_SLEEP); 948 /* XXX could probably just use name here */ 949 dsl_dir_name(dd, child); 950 (void) strcat(child, "@"); 951 (void) strcat(child, attr.za_name); 952 err = func(child, arg); 953 kmem_free(child, MAXPATHLEN); 954 if (err) 955 break; 956 } 957 zap_cursor_fini(&zc); 958 } 959 960 dsl_dir_close(dd, FTAG); 961 962 if (err) 963 return (err); 964 965 /* 966 * Apply to self if appropriate. 967 */ 968 if (do_self) 969 err = func(name, arg); 970 return (err); 971 } 972