1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved. 29 * Copyright (c) 2014 Integros [integros.com] 30 * Copyright 2017 Nexenta Systems, Inc. 31 */ 32 33 /* Portions Copyright 2010 Robert Milkowski */ 34 35 #include <sys/cred.h> 36 #include <sys/zfs_context.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/dsl_prop.h> 41 #include <sys/dsl_pool.h> 42 #include <sys/dsl_synctask.h> 43 #include <sys/dsl_deleg.h> 44 #include <sys/dnode.h> 45 #include <sys/dbuf.h> 46 #include <sys/zvol.h> 47 #include <sys/dmu_tx.h> 48 #include <sys/zap.h> 49 #include <sys/zil.h> 50 #include <sys/dmu_impl.h> 51 #include <sys/zfs_ioctl.h> 52 #include <sys/sa.h> 53 #include <sys/zfs_onexit.h> 54 #include <sys/dsl_destroy.h> 55 #include <sys/vdev.h> 56 #include <sys/zfeature.h> 57 #include "zfs_namecheck.h" 58 59 /* 60 * Needed to close a window in dnode_move() that allows the objset to be freed 61 * before it can be safely accessed. 62 */ 63 krwlock_t os_lock; 64 65 /* 66 * Tunable to overwrite the maximum number of threads for the parallization 67 * of dmu_objset_find_dp, needed to speed up the import of pools with many 68 * datasets. 69 * Default is 4 times the number of leaf vdevs. 70 */ 71 int dmu_find_threads = 0; 72 73 /* 74 * Backfill lower metadnode objects after this many have been freed. 75 * Backfilling negatively impacts object creation rates, so only do it 76 * if there are enough holes to fill. 77 */ 78 int dmu_rescan_dnode_threshold = 131072; 79 80 static void dmu_objset_find_dp_cb(void *arg); 81 82 void 83 dmu_objset_init(void) 84 { 85 rw_init(&os_lock, NULL, RW_DEFAULT, NULL); 86 } 87 88 void 89 dmu_objset_fini(void) 90 { 91 rw_destroy(&os_lock); 92 } 93 94 spa_t * 95 dmu_objset_spa(objset_t *os) 96 { 97 return (os->os_spa); 98 } 99 100 zilog_t * 101 dmu_objset_zil(objset_t *os) 102 { 103 return (os->os_zil); 104 } 105 106 dsl_pool_t * 107 dmu_objset_pool(objset_t *os) 108 { 109 dsl_dataset_t *ds; 110 111 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) 112 return (ds->ds_dir->dd_pool); 113 else 114 return (spa_get_dsl(os->os_spa)); 115 } 116 117 dsl_dataset_t * 118 dmu_objset_ds(objset_t *os) 119 { 120 return (os->os_dsl_dataset); 121 } 122 123 dmu_objset_type_t 124 dmu_objset_type(objset_t *os) 125 { 126 return (os->os_phys->os_type); 127 } 128 129 void 130 dmu_objset_name(objset_t *os, char *buf) 131 { 132 dsl_dataset_name(os->os_dsl_dataset, buf); 133 } 134 135 uint64_t 136 dmu_objset_id(objset_t *os) 137 { 138 dsl_dataset_t *ds = os->os_dsl_dataset; 139 140 return (ds ? ds->ds_object : 0); 141 } 142 143 uint64_t 144 dmu_objset_dnodesize(objset_t *os) 145 { 146 return (os->os_dnodesize); 147 } 148 149 zfs_sync_type_t 150 dmu_objset_syncprop(objset_t *os) 151 { 152 return (os->os_sync); 153 } 154 155 zfs_logbias_op_t 156 dmu_objset_logbias(objset_t *os) 157 { 158 return (os->os_logbias); 159 } 160 161 static void 162 checksum_changed_cb(void *arg, uint64_t newval) 163 { 164 objset_t *os = arg; 165 166 /* 167 * Inheritance should have been done by now. 168 */ 169 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 170 171 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 172 } 173 174 static void 175 compression_changed_cb(void *arg, uint64_t newval) 176 { 177 objset_t *os = arg; 178 179 /* 180 * Inheritance and range checking should have been done by now. 181 */ 182 ASSERT(newval != ZIO_COMPRESS_INHERIT); 183 184 os->os_compress = zio_compress_select(os->os_spa, newval, 185 ZIO_COMPRESS_ON); 186 } 187 188 static void 189 copies_changed_cb(void *arg, uint64_t newval) 190 { 191 objset_t *os = arg; 192 193 /* 194 * Inheritance and range checking should have been done by now. 195 */ 196 ASSERT(newval > 0); 197 ASSERT(newval <= spa_max_replication(os->os_spa)); 198 199 os->os_copies = newval; 200 } 201 202 static void 203 dedup_changed_cb(void *arg, uint64_t newval) 204 { 205 objset_t *os = arg; 206 spa_t *spa = os->os_spa; 207 enum zio_checksum checksum; 208 209 /* 210 * Inheritance should have been done by now. 211 */ 212 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 213 214 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); 215 216 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; 217 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); 218 } 219 220 static void 221 primary_cache_changed_cb(void *arg, uint64_t newval) 222 { 223 objset_t *os = arg; 224 225 /* 226 * Inheritance and range checking should have been done by now. 227 */ 228 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 229 newval == ZFS_CACHE_METADATA); 230 231 os->os_primary_cache = newval; 232 } 233 234 static void 235 secondary_cache_changed_cb(void *arg, uint64_t newval) 236 { 237 objset_t *os = arg; 238 239 /* 240 * Inheritance and range checking should have been done by now. 241 */ 242 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 243 newval == ZFS_CACHE_METADATA); 244 245 os->os_secondary_cache = newval; 246 } 247 248 static void 249 sync_changed_cb(void *arg, uint64_t newval) 250 { 251 objset_t *os = arg; 252 253 /* 254 * Inheritance and range checking should have been done by now. 255 */ 256 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || 257 newval == ZFS_SYNC_DISABLED); 258 259 os->os_sync = newval; 260 if (os->os_zil) 261 zil_set_sync(os->os_zil, newval); 262 } 263 264 static void 265 redundant_metadata_changed_cb(void *arg, uint64_t newval) 266 { 267 objset_t *os = arg; 268 269 /* 270 * Inheritance and range checking should have been done by now. 271 */ 272 ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL || 273 newval == ZFS_REDUNDANT_METADATA_MOST); 274 275 os->os_redundant_metadata = newval; 276 } 277 278 static void 279 dnodesize_changed_cb(void *arg, uint64_t newval) 280 { 281 objset_t *os = arg; 282 283 switch (newval) { 284 case ZFS_DNSIZE_LEGACY: 285 os->os_dnodesize = DNODE_MIN_SIZE; 286 break; 287 case ZFS_DNSIZE_AUTO: 288 /* 289 * Choose a dnode size that will work well for most 290 * workloads if the user specified "auto". Future code 291 * improvements could dynamically select a dnode size 292 * based on observed workload patterns. 293 */ 294 os->os_dnodesize = DNODE_MIN_SIZE * 2; 295 break; 296 case ZFS_DNSIZE_1K: 297 case ZFS_DNSIZE_2K: 298 case ZFS_DNSIZE_4K: 299 case ZFS_DNSIZE_8K: 300 case ZFS_DNSIZE_16K: 301 os->os_dnodesize = newval; 302 break; 303 } 304 } 305 306 static void 307 smallblk_changed_cb(void *arg, uint64_t newval) 308 { 309 objset_t *os = arg; 310 311 /* 312 * Inheritance and range checking should have been done by now. 313 */ 314 ASSERT(newval <= SPA_OLD_MAXBLOCKSIZE); 315 ASSERT(ISP2(newval)); 316 317 os->os_zpl_special_smallblock = newval; 318 } 319 320 static void 321 logbias_changed_cb(void *arg, uint64_t newval) 322 { 323 objset_t *os = arg; 324 325 ASSERT(newval == ZFS_LOGBIAS_LATENCY || 326 newval == ZFS_LOGBIAS_THROUGHPUT); 327 os->os_logbias = newval; 328 if (os->os_zil) 329 zil_set_logbias(os->os_zil, newval); 330 } 331 332 static void 333 recordsize_changed_cb(void *arg, uint64_t newval) 334 { 335 objset_t *os = arg; 336 337 os->os_recordsize = newval; 338 } 339 340 void 341 dmu_objset_byteswap(void *buf, size_t size) 342 { 343 objset_phys_t *osp = buf; 344 345 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); 346 dnode_byteswap(&osp->os_meta_dnode); 347 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 348 osp->os_type = BSWAP_64(osp->os_type); 349 osp->os_flags = BSWAP_64(osp->os_flags); 350 if (size == sizeof (objset_phys_t)) { 351 dnode_byteswap(&osp->os_userused_dnode); 352 dnode_byteswap(&osp->os_groupused_dnode); 353 } 354 } 355 356 /* 357 * The hash is a CRC-based hash of the objset_t pointer and the object number. 358 */ 359 static uint64_t 360 dnode_hash(const objset_t *os, uint64_t obj) 361 { 362 uintptr_t osv = (uintptr_t)os; 363 uint64_t crc = -1ULL; 364 365 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 366 /* 367 * The low 6 bits of the pointer don't have much entropy, because 368 * the objset_t is larger than 2^6 bytes long. 369 */ 370 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 371 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 372 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 373 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF]; 374 375 crc ^= (osv>>14) ^ (obj>>24); 376 377 return (crc); 378 } 379 380 unsigned int 381 dnode_multilist_index_func(multilist_t *ml, void *obj) 382 { 383 dnode_t *dn = obj; 384 return (dnode_hash(dn->dn_objset, dn->dn_object) % 385 multilist_get_num_sublists(ml)); 386 } 387 388 /* 389 * Instantiates the objset_t in-memory structure corresponding to the 390 * objset_phys_t that's pointed to by the specified blkptr_t. 391 */ 392 int 393 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 394 objset_t **osp) 395 { 396 objset_t *os; 397 int i, err; 398 399 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 400 401 /* 402 * The $ORIGIN dataset (if it exists) doesn't have an associated 403 * objset, so there's no reason to open it. The $ORIGIN dataset 404 * will not exist on pools older than SPA_VERSION_ORIGIN. 405 */ 406 if (ds != NULL && spa_get_dsl(spa) != NULL && 407 spa_get_dsl(spa)->dp_origin_snap != NULL) { 408 ASSERT3P(ds->ds_dir, !=, 409 spa_get_dsl(spa)->dp_origin_snap->ds_dir); 410 } 411 412 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); 413 os->os_dsl_dataset = ds; 414 os->os_spa = spa; 415 os->os_rootbp = bp; 416 if (!BP_IS_HOLE(os->os_rootbp)) { 417 arc_flags_t aflags = ARC_FLAG_WAIT; 418 zbookmark_phys_t zb; 419 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 420 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 421 422 if (DMU_OS_IS_L2CACHEABLE(os)) 423 aflags |= ARC_FLAG_L2CACHE; 424 425 dprintf_bp(os->os_rootbp, "reading %s", ""); 426 err = arc_read(NULL, spa, os->os_rootbp, 427 arc_getbuf_func, &os->os_phys_buf, 428 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 429 if (err != 0) { 430 kmem_free(os, sizeof (objset_t)); 431 /* convert checksum errors into IO errors */ 432 if (err == ECKSUM) 433 err = SET_ERROR(EIO); 434 return (err); 435 } 436 437 /* Increase the blocksize if we are permitted. */ 438 if (spa_version(spa) >= SPA_VERSION_USERSPACE && 439 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { 440 arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf, 441 ARC_BUFC_METADATA, sizeof (objset_phys_t)); 442 bzero(buf->b_data, sizeof (objset_phys_t)); 443 bcopy(os->os_phys_buf->b_data, buf->b_data, 444 arc_buf_size(os->os_phys_buf)); 445 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 446 os->os_phys_buf = buf; 447 } 448 449 os->os_phys = os->os_phys_buf->b_data; 450 os->os_flags = os->os_phys->os_flags; 451 } else { 452 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? 453 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; 454 os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf, 455 ARC_BUFC_METADATA, size); 456 os->os_phys = os->os_phys_buf->b_data; 457 bzero(os->os_phys, size); 458 } 459 460 /* 461 * Note: the changed_cb will be called once before the register 462 * func returns, thus changing the checksum/compression from the 463 * default (fletcher2/off). Snapshots don't need to know about 464 * checksum/compression/copies. 465 */ 466 if (ds != NULL) { 467 boolean_t needlock = B_FALSE; 468 469 /* 470 * Note: it's valid to open the objset if the dataset is 471 * long-held, in which case the pool_config lock will not 472 * be held. 473 */ 474 if (!dsl_pool_config_held(dmu_objset_pool(os))) { 475 needlock = B_TRUE; 476 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 477 } 478 err = dsl_prop_register(ds, 479 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 480 primary_cache_changed_cb, os); 481 if (err == 0) { 482 err = dsl_prop_register(ds, 483 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 484 secondary_cache_changed_cb, os); 485 } 486 if (!ds->ds_is_snapshot) { 487 if (err == 0) { 488 err = dsl_prop_register(ds, 489 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 490 checksum_changed_cb, os); 491 } 492 if (err == 0) { 493 err = dsl_prop_register(ds, 494 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 495 compression_changed_cb, os); 496 } 497 if (err == 0) { 498 err = dsl_prop_register(ds, 499 zfs_prop_to_name(ZFS_PROP_COPIES), 500 copies_changed_cb, os); 501 } 502 if (err == 0) { 503 err = dsl_prop_register(ds, 504 zfs_prop_to_name(ZFS_PROP_DEDUP), 505 dedup_changed_cb, os); 506 } 507 if (err == 0) { 508 err = dsl_prop_register(ds, 509 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 510 logbias_changed_cb, os); 511 } 512 if (err == 0) { 513 err = dsl_prop_register(ds, 514 zfs_prop_to_name(ZFS_PROP_SYNC), 515 sync_changed_cb, os); 516 } 517 if (err == 0) { 518 err = dsl_prop_register(ds, 519 zfs_prop_to_name( 520 ZFS_PROP_REDUNDANT_METADATA), 521 redundant_metadata_changed_cb, os); 522 } 523 if (err == 0) { 524 err = dsl_prop_register(ds, 525 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), 526 recordsize_changed_cb, os); 527 } 528 if (err == 0) { 529 err = dsl_prop_register(ds, 530 zfs_prop_to_name(ZFS_PROP_DNODESIZE), 531 dnodesize_changed_cb, os); 532 } 533 if (err == 0) { 534 err = dsl_prop_register(ds, 535 zfs_prop_to_name( 536 ZFS_PROP_SPECIAL_SMALL_BLOCKS), 537 smallblk_changed_cb, os); 538 } 539 } 540 if (needlock) 541 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 542 if (err != 0) { 543 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 544 kmem_free(os, sizeof (objset_t)); 545 return (err); 546 } 547 } else { 548 /* It's the meta-objset. */ 549 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 550 os->os_compress = ZIO_COMPRESS_ON; 551 os->os_copies = spa_max_replication(spa); 552 os->os_dedup_checksum = ZIO_CHECKSUM_OFF; 553 os->os_dedup_verify = B_FALSE; 554 os->os_logbias = ZFS_LOGBIAS_LATENCY; 555 os->os_sync = ZFS_SYNC_STANDARD; 556 os->os_primary_cache = ZFS_CACHE_ALL; 557 os->os_secondary_cache = ZFS_CACHE_ALL; 558 os->os_dnodesize = DNODE_MIN_SIZE; 559 } 560 /* 561 * These properties will be filled in by the logic in zfs_get_zplprop() 562 * when they are queried for the first time. 563 */ 564 os->os_version = OBJSET_PROP_UNINITIALIZED; 565 os->os_normalization = OBJSET_PROP_UNINITIALIZED; 566 os->os_utf8only = OBJSET_PROP_UNINITIALIZED; 567 os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED; 568 569 if (ds == NULL || !ds->ds_is_snapshot) 570 os->os_zil_header = os->os_phys->os_zil_header; 571 os->os_zil = zil_alloc(os, &os->os_zil_header); 572 573 for (i = 0; i < TXG_SIZE; i++) { 574 os->os_dirty_dnodes[i] = multilist_create(sizeof (dnode_t), 575 offsetof(dnode_t, dn_dirty_link[i]), 576 dnode_multilist_index_func); 577 } 578 list_create(&os->os_dnodes, sizeof (dnode_t), 579 offsetof(dnode_t, dn_link)); 580 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 581 offsetof(dmu_buf_impl_t, db_link)); 582 583 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); 584 mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL); 585 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 586 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 587 os->os_obj_next_percpu_len = boot_ncpus; 588 os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len * 589 sizeof (os->os_obj_next_percpu[0]), KM_SLEEP); 590 591 dnode_special_open(os, &os->os_phys->os_meta_dnode, 592 DMU_META_DNODE_OBJECT, &os->os_meta_dnode); 593 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { 594 dnode_special_open(os, &os->os_phys->os_userused_dnode, 595 DMU_USERUSED_OBJECT, &os->os_userused_dnode); 596 dnode_special_open(os, &os->os_phys->os_groupused_dnode, 597 DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); 598 } 599 600 *osp = os; 601 return (0); 602 } 603 604 int 605 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) 606 { 607 int err = 0; 608 609 /* 610 * We shouldn't be doing anything with dsl_dataset_t's unless the 611 * pool_config lock is held, or the dataset is long-held. 612 */ 613 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) || 614 dsl_dataset_long_held(ds)); 615 616 mutex_enter(&ds->ds_opening_lock); 617 if (ds->ds_objset == NULL) { 618 objset_t *os; 619 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 620 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 621 ds, dsl_dataset_get_blkptr(ds), &os); 622 rrw_exit(&ds->ds_bp_rwlock, FTAG); 623 624 if (err == 0) { 625 mutex_enter(&ds->ds_lock); 626 ASSERT(ds->ds_objset == NULL); 627 ds->ds_objset = os; 628 mutex_exit(&ds->ds_lock); 629 } 630 } 631 *osp = ds->ds_objset; 632 mutex_exit(&ds->ds_opening_lock); 633 return (err); 634 } 635 636 /* 637 * Holds the pool while the objset is held. Therefore only one objset 638 * can be held at a time. 639 */ 640 int 641 dmu_objset_hold(const char *name, void *tag, objset_t **osp) 642 { 643 dsl_pool_t *dp; 644 dsl_dataset_t *ds; 645 int err; 646 647 err = dsl_pool_hold(name, tag, &dp); 648 if (err != 0) 649 return (err); 650 err = dsl_dataset_hold(dp, name, tag, &ds); 651 if (err != 0) { 652 dsl_pool_rele(dp, tag); 653 return (err); 654 } 655 656 err = dmu_objset_from_ds(ds, osp); 657 if (err != 0) { 658 dsl_dataset_rele(ds, tag); 659 dsl_pool_rele(dp, tag); 660 } 661 662 return (err); 663 } 664 665 static int 666 dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type, 667 boolean_t readonly, void *tag, objset_t **osp) 668 { 669 int err; 670 671 err = dmu_objset_from_ds(ds, osp); 672 if (err != 0) { 673 dsl_dataset_disown(ds, tag); 674 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { 675 dsl_dataset_disown(ds, tag); 676 return (SET_ERROR(EINVAL)); 677 } else if (!readonly && dsl_dataset_is_snapshot(ds)) { 678 dsl_dataset_disown(ds, tag); 679 return (SET_ERROR(EROFS)); 680 } 681 return (err); 682 } 683 684 /* 685 * dsl_pool must not be held when this is called. 686 * Upon successful return, there will be a longhold on the dataset, 687 * and the dsl_pool will not be held. 688 */ 689 int 690 dmu_objset_own(const char *name, dmu_objset_type_t type, 691 boolean_t readonly, void *tag, objset_t **osp) 692 { 693 dsl_pool_t *dp; 694 dsl_dataset_t *ds; 695 int err; 696 697 err = dsl_pool_hold(name, FTAG, &dp); 698 if (err != 0) 699 return (err); 700 err = dsl_dataset_own(dp, name, tag, &ds); 701 if (err != 0) { 702 dsl_pool_rele(dp, FTAG); 703 return (err); 704 } 705 err = dmu_objset_own_impl(ds, type, readonly, tag, osp); 706 dsl_pool_rele(dp, FTAG); 707 708 return (err); 709 } 710 711 int 712 dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type, 713 boolean_t readonly, void *tag, objset_t **osp) 714 { 715 dsl_dataset_t *ds; 716 int err; 717 718 err = dsl_dataset_own_obj(dp, obj, tag, &ds); 719 if (err != 0) 720 return (err); 721 722 return (dmu_objset_own_impl(ds, type, readonly, tag, osp)); 723 } 724 725 void 726 dmu_objset_rele(objset_t *os, void *tag) 727 { 728 dsl_pool_t *dp = dmu_objset_pool(os); 729 dsl_dataset_rele(os->os_dsl_dataset, tag); 730 dsl_pool_rele(dp, tag); 731 } 732 733 /* 734 * When we are called, os MUST refer to an objset associated with a dataset 735 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner 736 * == tag. We will then release and reacquire ownership of the dataset while 737 * holding the pool config_rwlock to avoid intervening namespace or ownership 738 * changes may occur. 739 * 740 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to 741 * release the hold on its dataset and acquire a new one on the dataset of the 742 * same name so that it can be partially torn down and reconstructed. 743 */ 744 void 745 dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds, 746 void *tag) 747 { 748 dsl_pool_t *dp; 749 char name[ZFS_MAX_DATASET_NAME_LEN]; 750 751 VERIFY3P(ds, !=, NULL); 752 VERIFY3P(ds->ds_owner, ==, tag); 753 VERIFY(dsl_dataset_long_held(ds)); 754 755 dsl_dataset_name(ds, name); 756 dp = ds->ds_dir->dd_pool; 757 dsl_pool_config_enter(dp, FTAG); 758 dsl_dataset_disown(ds, tag); 759 VERIFY0(dsl_dataset_own(dp, name, tag, newds)); 760 dsl_pool_config_exit(dp, FTAG); 761 } 762 763 void 764 dmu_objset_disown(objset_t *os, void *tag) 765 { 766 dsl_dataset_disown(os->os_dsl_dataset, tag); 767 } 768 769 void 770 dmu_objset_evict_dbufs(objset_t *os) 771 { 772 dnode_t dn_marker; 773 dnode_t *dn; 774 775 mutex_enter(&os->os_lock); 776 dn = list_head(&os->os_dnodes); 777 while (dn != NULL) { 778 /* 779 * Skip dnodes without holds. We have to do this dance 780 * because dnode_add_ref() only works if there is already a 781 * hold. If the dnode has no holds, then it has no dbufs. 782 */ 783 if (dnode_add_ref(dn, FTAG)) { 784 list_insert_after(&os->os_dnodes, dn, &dn_marker); 785 mutex_exit(&os->os_lock); 786 787 dnode_evict_dbufs(dn); 788 dnode_rele(dn, FTAG); 789 790 mutex_enter(&os->os_lock); 791 dn = list_next(&os->os_dnodes, &dn_marker); 792 list_remove(&os->os_dnodes, &dn_marker); 793 } else { 794 dn = list_next(&os->os_dnodes, dn); 795 } 796 } 797 mutex_exit(&os->os_lock); 798 799 if (DMU_USERUSED_DNODE(os) != NULL) { 800 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os)); 801 dnode_evict_dbufs(DMU_USERUSED_DNODE(os)); 802 } 803 dnode_evict_dbufs(DMU_META_DNODE(os)); 804 } 805 806 /* 807 * Objset eviction processing is split into into two pieces. 808 * The first marks the objset as evicting, evicts any dbufs that 809 * have a refcount of zero, and then queues up the objset for the 810 * second phase of eviction. Once os->os_dnodes has been cleared by 811 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed. 812 * The second phase closes the special dnodes, dequeues the objset from 813 * the list of those undergoing eviction, and finally frees the objset. 814 * 815 * NOTE: Due to asynchronous eviction processing (invocation of 816 * dnode_buf_pageout()), it is possible for the meta dnode for the 817 * objset to have no holds even though os->os_dnodes is not empty. 818 */ 819 void 820 dmu_objset_evict(objset_t *os) 821 { 822 dsl_dataset_t *ds = os->os_dsl_dataset; 823 824 for (int t = 0; t < TXG_SIZE; t++) 825 ASSERT(!dmu_objset_is_dirty(os, t)); 826 827 if (ds) 828 dsl_prop_unregister_all(ds, os); 829 830 if (os->os_sa) 831 sa_tear_down(os); 832 833 dmu_objset_evict_dbufs(os); 834 835 mutex_enter(&os->os_lock); 836 spa_evicting_os_register(os->os_spa, os); 837 if (list_is_empty(&os->os_dnodes)) { 838 mutex_exit(&os->os_lock); 839 dmu_objset_evict_done(os); 840 } else { 841 mutex_exit(&os->os_lock); 842 } 843 } 844 845 void 846 dmu_objset_evict_done(objset_t *os) 847 { 848 ASSERT3P(list_head(&os->os_dnodes), ==, NULL); 849 850 dnode_special_close(&os->os_meta_dnode); 851 if (DMU_USERUSED_DNODE(os)) { 852 dnode_special_close(&os->os_userused_dnode); 853 dnode_special_close(&os->os_groupused_dnode); 854 } 855 zil_free(os->os_zil); 856 857 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 858 859 /* 860 * This is a barrier to prevent the objset from going away in 861 * dnode_move() until we can safely ensure that the objset is still in 862 * use. We consider the objset valid before the barrier and invalid 863 * after the barrier. 864 */ 865 rw_enter(&os_lock, RW_READER); 866 rw_exit(&os_lock); 867 868 kmem_free(os->os_obj_next_percpu, 869 os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0])); 870 871 mutex_destroy(&os->os_lock); 872 mutex_destroy(&os->os_userused_lock); 873 mutex_destroy(&os->os_obj_lock); 874 mutex_destroy(&os->os_user_ptr_lock); 875 for (int i = 0; i < TXG_SIZE; i++) { 876 multilist_destroy(os->os_dirty_dnodes[i]); 877 } 878 spa_evicting_os_deregister(os->os_spa, os); 879 kmem_free(os, sizeof (objset_t)); 880 } 881 882 timestruc_t 883 dmu_objset_snap_cmtime(objset_t *os) 884 { 885 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); 886 } 887 888 /* called from dsl for meta-objset */ 889 objset_t * 890 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 891 dmu_objset_type_t type, dmu_tx_t *tx) 892 { 893 objset_t *os; 894 dnode_t *mdn; 895 896 ASSERT(dmu_tx_is_syncing(tx)); 897 898 if (ds != NULL) 899 VERIFY0(dmu_objset_from_ds(ds, &os)); 900 else 901 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); 902 903 mdn = DMU_META_DNODE(os); 904 905 dnode_allocate(mdn, DMU_OT_DNODE, DNODE_BLOCK_SIZE, DN_MAX_INDBLKSHIFT, 906 DMU_OT_NONE, 0, DNODE_MIN_SLOTS, tx); 907 908 /* 909 * We don't want to have to increase the meta-dnode's nlevels 910 * later, because then we could do it in quescing context while 911 * we are also accessing it in open context. 912 * 913 * This precaution is not necessary for the MOS (ds == NULL), 914 * because the MOS is only updated in syncing context. 915 * This is most fortunate: the MOS is the only objset that 916 * needs to be synced multiple times as spa_sync() iterates 917 * to convergence, so minimizing its dn_nlevels matters. 918 */ 919 if (ds != NULL) { 920 int levels = 1; 921 922 /* 923 * Determine the number of levels necessary for the meta-dnode 924 * to contain DN_MAX_OBJECT dnodes. Note that in order to 925 * ensure that we do not overflow 64 bits, there has to be 926 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT 927 * but < 2^64. Therefore, 928 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be 929 * less than (64 - log2(DN_MAX_OBJECT)) (16). 930 */ 931 while ((uint64_t)mdn->dn_nblkptr << 932 (mdn->dn_datablkshift - DNODE_SHIFT + 933 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 934 DN_MAX_OBJECT) 935 levels++; 936 937 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 938 mdn->dn_nlevels = levels; 939 } 940 941 ASSERT(type != DMU_OST_NONE); 942 ASSERT(type != DMU_OST_ANY); 943 ASSERT(type < DMU_OST_NUMTYPES); 944 os->os_phys->os_type = type; 945 if (dmu_objset_userused_enabled(os)) { 946 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 947 os->os_flags = os->os_phys->os_flags; 948 } 949 950 dsl_dataset_dirty(ds, tx); 951 952 return (os); 953 } 954 955 typedef struct dmu_objset_create_arg { 956 const char *doca_name; 957 cred_t *doca_cred; 958 void (*doca_userfunc)(objset_t *os, void *arg, 959 cred_t *cr, dmu_tx_t *tx); 960 void *doca_userarg; 961 dmu_objset_type_t doca_type; 962 uint64_t doca_flags; 963 } dmu_objset_create_arg_t; 964 965 /*ARGSUSED*/ 966 static int 967 dmu_objset_create_check(void *arg, dmu_tx_t *tx) 968 { 969 dmu_objset_create_arg_t *doca = arg; 970 dsl_pool_t *dp = dmu_tx_pool(tx); 971 dsl_dir_t *pdd; 972 const char *tail; 973 int error; 974 975 if (strchr(doca->doca_name, '@') != NULL) 976 return (SET_ERROR(EINVAL)); 977 978 if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN) 979 return (SET_ERROR(ENAMETOOLONG)); 980 981 if (dataset_nestcheck(doca->doca_name) != 0) 982 return (SET_ERROR(ENAMETOOLONG)); 983 984 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); 985 if (error != 0) 986 return (error); 987 if (tail == NULL) { 988 dsl_dir_rele(pdd, FTAG); 989 return (SET_ERROR(EEXIST)); 990 } 991 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 992 doca->doca_cred); 993 dsl_dir_rele(pdd, FTAG); 994 995 return (error); 996 } 997 998 static void 999 dmu_objset_create_sync(void *arg, dmu_tx_t *tx) 1000 { 1001 dmu_objset_create_arg_t *doca = arg; 1002 dsl_pool_t *dp = dmu_tx_pool(tx); 1003 dsl_dir_t *pdd; 1004 const char *tail; 1005 dsl_dataset_t *ds; 1006 uint64_t obj; 1007 blkptr_t *bp; 1008 objset_t *os; 1009 1010 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); 1011 1012 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, 1013 doca->doca_cred, tx); 1014 1015 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 1016 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 1017 bp = dsl_dataset_get_blkptr(ds); 1018 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, 1019 ds, bp, doca->doca_type, tx); 1020 rrw_exit(&ds->ds_bp_rwlock, FTAG); 1021 1022 if (doca->doca_userfunc != NULL) { 1023 doca->doca_userfunc(os, doca->doca_userarg, 1024 doca->doca_cred, tx); 1025 } 1026 1027 spa_history_log_internal_ds(ds, "create", tx, ""); 1028 dsl_dataset_rele(ds, FTAG); 1029 dsl_dir_rele(pdd, FTAG); 1030 } 1031 1032 int 1033 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 1034 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 1035 { 1036 dmu_objset_create_arg_t doca; 1037 1038 doca.doca_name = name; 1039 doca.doca_cred = CRED(); 1040 doca.doca_flags = flags; 1041 doca.doca_userfunc = func; 1042 doca.doca_userarg = arg; 1043 doca.doca_type = type; 1044 1045 return (dsl_sync_task(name, 1046 dmu_objset_create_check, dmu_objset_create_sync, &doca, 1047 5, ZFS_SPACE_CHECK_NORMAL)); 1048 } 1049 1050 typedef struct dmu_objset_clone_arg { 1051 const char *doca_clone; 1052 const char *doca_origin; 1053 cred_t *doca_cred; 1054 } dmu_objset_clone_arg_t; 1055 1056 /*ARGSUSED*/ 1057 static int 1058 dmu_objset_clone_check(void *arg, dmu_tx_t *tx) 1059 { 1060 dmu_objset_clone_arg_t *doca = arg; 1061 dsl_dir_t *pdd; 1062 const char *tail; 1063 int error; 1064 dsl_dataset_t *origin; 1065 dsl_pool_t *dp = dmu_tx_pool(tx); 1066 1067 if (strchr(doca->doca_clone, '@') != NULL) 1068 return (SET_ERROR(EINVAL)); 1069 1070 if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN) 1071 return (SET_ERROR(ENAMETOOLONG)); 1072 1073 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); 1074 if (error != 0) 1075 return (error); 1076 if (tail == NULL) { 1077 dsl_dir_rele(pdd, FTAG); 1078 return (SET_ERROR(EEXIST)); 1079 } 1080 1081 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 1082 doca->doca_cred); 1083 if (error != 0) { 1084 dsl_dir_rele(pdd, FTAG); 1085 return (SET_ERROR(EDQUOT)); 1086 } 1087 dsl_dir_rele(pdd, FTAG); 1088 1089 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); 1090 if (error != 0) 1091 return (error); 1092 1093 /* You can only clone snapshots, not the head datasets. */ 1094 if (!origin->ds_is_snapshot) { 1095 dsl_dataset_rele(origin, FTAG); 1096 return (SET_ERROR(EINVAL)); 1097 } 1098 dsl_dataset_rele(origin, FTAG); 1099 1100 return (0); 1101 } 1102 1103 static void 1104 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) 1105 { 1106 dmu_objset_clone_arg_t *doca = arg; 1107 dsl_pool_t *dp = dmu_tx_pool(tx); 1108 dsl_dir_t *pdd; 1109 const char *tail; 1110 dsl_dataset_t *origin, *ds; 1111 uint64_t obj; 1112 char namebuf[ZFS_MAX_DATASET_NAME_LEN]; 1113 1114 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); 1115 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); 1116 1117 obj = dsl_dataset_create_sync(pdd, tail, origin, 0, 1118 doca->doca_cred, tx); 1119 1120 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 1121 dsl_dataset_name(origin, namebuf); 1122 spa_history_log_internal_ds(ds, "clone", tx, 1123 "origin=%s (%llu)", namebuf, origin->ds_object); 1124 dsl_dataset_rele(ds, FTAG); 1125 dsl_dataset_rele(origin, FTAG); 1126 dsl_dir_rele(pdd, FTAG); 1127 } 1128 1129 int 1130 dmu_objset_clone(const char *clone, const char *origin) 1131 { 1132 dmu_objset_clone_arg_t doca; 1133 1134 doca.doca_clone = clone; 1135 doca.doca_origin = origin; 1136 doca.doca_cred = CRED(); 1137 1138 return (dsl_sync_task(clone, 1139 dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 1140 5, ZFS_SPACE_CHECK_NORMAL)); 1141 } 1142 1143 static int 1144 dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg) 1145 { 1146 int error = 0; 1147 uint64_t object = 0; 1148 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { 1149 error = dmu_object_remap_indirects(os, object, 1150 last_removed_txg); 1151 /* 1152 * If the ZPL removed the object before we managed to dnode_hold 1153 * it, we would get an ENOENT. If the ZPL declares its intent 1154 * to remove the object (dnode_free) before we manage to 1155 * dnode_hold it, we would get an EEXIST. In either case, we 1156 * want to continue remapping the other objects in the objset; 1157 * in all other cases, we want to break early. 1158 */ 1159 if (error != 0 && error != ENOENT && error != EEXIST) { 1160 break; 1161 } 1162 } 1163 if (error == ESRCH) { 1164 error = 0; 1165 } 1166 return (error); 1167 } 1168 1169 int 1170 dmu_objset_remap_indirects(const char *fsname) 1171 { 1172 int error = 0; 1173 objset_t *os = NULL; 1174 uint64_t last_removed_txg; 1175 uint64_t remap_start_txg; 1176 dsl_dir_t *dd; 1177 1178 error = dmu_objset_hold(fsname, FTAG, &os); 1179 if (error != 0) { 1180 return (error); 1181 } 1182 dd = dmu_objset_ds(os)->ds_dir; 1183 1184 if (!spa_feature_is_enabled(dmu_objset_spa(os), 1185 SPA_FEATURE_OBSOLETE_COUNTS)) { 1186 dmu_objset_rele(os, FTAG); 1187 return (SET_ERROR(ENOTSUP)); 1188 } 1189 1190 if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) { 1191 dmu_objset_rele(os, FTAG); 1192 return (SET_ERROR(EINVAL)); 1193 } 1194 1195 /* 1196 * If there has not been a removal, we're done. 1197 */ 1198 last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os)); 1199 if (last_removed_txg == -1ULL) { 1200 dmu_objset_rele(os, FTAG); 1201 return (0); 1202 } 1203 1204 /* 1205 * If we have remapped since the last removal, we're done. 1206 */ 1207 if (dsl_dir_is_zapified(dd)) { 1208 uint64_t last_remap_txg; 1209 if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)), 1210 dd->dd_object, DD_FIELD_LAST_REMAP_TXG, 1211 sizeof (last_remap_txg), 1, &last_remap_txg) == 0 && 1212 last_remap_txg > last_removed_txg) { 1213 dmu_objset_rele(os, FTAG); 1214 return (0); 1215 } 1216 } 1217 1218 dsl_dataset_long_hold(dmu_objset_ds(os), FTAG); 1219 dsl_pool_rele(dmu_objset_pool(os), FTAG); 1220 1221 remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os)); 1222 error = dmu_objset_remap_indirects_impl(os, last_removed_txg); 1223 if (error == 0) { 1224 /* 1225 * We update the last_remap_txg to be the start txg so that 1226 * we can guarantee that every block older than last_remap_txg 1227 * that can be remapped has been remapped. 1228 */ 1229 error = dsl_dir_update_last_remap_txg(dd, remap_start_txg); 1230 } 1231 1232 dsl_dataset_long_rele(dmu_objset_ds(os), FTAG); 1233 dsl_dataset_rele(dmu_objset_ds(os), FTAG); 1234 1235 return (error); 1236 } 1237 1238 int 1239 dmu_objset_snapshot_one(const char *fsname, const char *snapname) 1240 { 1241 int err; 1242 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); 1243 nvlist_t *snaps = fnvlist_alloc(); 1244 1245 fnvlist_add_boolean(snaps, longsnap); 1246 strfree(longsnap); 1247 err = dsl_dataset_snapshot(snaps, NULL, NULL); 1248 fnvlist_free(snaps); 1249 return (err); 1250 } 1251 1252 static void 1253 dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx) 1254 { 1255 dnode_t *dn; 1256 1257 while ((dn = multilist_sublist_head(list)) != NULL) { 1258 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 1259 ASSERT(dn->dn_dbuf->db_data_pending); 1260 /* 1261 * Initialize dn_zio outside dnode_sync() because the 1262 * meta-dnode needs to set it ouside dnode_sync(). 1263 */ 1264 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 1265 ASSERT(dn->dn_zio); 1266 1267 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 1268 multilist_sublist_remove(list, dn); 1269 1270 /* 1271 * If we are not doing useraccounting (os_synced_dnodes == NULL) 1272 * we are done with this dnode for this txg. Unset dn_dirty_txg 1273 * if later txgs aren't dirtying it so that future holders do 1274 * not get a stale value. Otherwise, we will do this in 1275 * userquota_updates_task() when processing has completely 1276 * finished for this txg. 1277 */ 1278 multilist_t *newlist = dn->dn_objset->os_synced_dnodes; 1279 if (newlist != NULL) { 1280 (void) dnode_add_ref(dn, newlist); 1281 multilist_insert(newlist, dn); 1282 } else { 1283 mutex_enter(&dn->dn_mtx); 1284 if (dn->dn_dirty_txg == tx->tx_txg) 1285 dn->dn_dirty_txg = 0; 1286 mutex_exit(&dn->dn_mtx); 1287 } 1288 1289 dnode_sync(dn, tx); 1290 } 1291 } 1292 1293 /* ARGSUSED */ 1294 static void 1295 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) 1296 { 1297 blkptr_t *bp = zio->io_bp; 1298 objset_t *os = arg; 1299 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 1300 1301 ASSERT(!BP_IS_EMBEDDED(bp)); 1302 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); 1303 ASSERT0(BP_GET_LEVEL(bp)); 1304 1305 /* 1306 * Update rootbp fill count: it should be the number of objects 1307 * allocated in the object set (not counting the "special" 1308 * objects that are stored in the objset_phys_t -- the meta 1309 * dnode and user/group accounting objects). 1310 */ 1311 bp->blk_fill = 0; 1312 for (int i = 0; i < dnp->dn_nblkptr; i++) 1313 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]); 1314 if (os->os_dsl_dataset != NULL) 1315 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG); 1316 *os->os_rootbp = *bp; 1317 if (os->os_dsl_dataset != NULL) 1318 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1319 } 1320 1321 /* ARGSUSED */ 1322 static void 1323 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) 1324 { 1325 blkptr_t *bp = zio->io_bp; 1326 blkptr_t *bp_orig = &zio->io_bp_orig; 1327 objset_t *os = arg; 1328 1329 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 1330 ASSERT(BP_EQUAL(bp, bp_orig)); 1331 } else { 1332 dsl_dataset_t *ds = os->os_dsl_dataset; 1333 dmu_tx_t *tx = os->os_synctx; 1334 1335 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 1336 dsl_dataset_block_born(ds, bp, tx); 1337 } 1338 kmem_free(bp, sizeof (*bp)); 1339 } 1340 1341 typedef struct sync_dnodes_arg { 1342 multilist_t *sda_list; 1343 int sda_sublist_idx; 1344 multilist_t *sda_newlist; 1345 dmu_tx_t *sda_tx; 1346 } sync_dnodes_arg_t; 1347 1348 static void 1349 sync_dnodes_task(void *arg) 1350 { 1351 sync_dnodes_arg_t *sda = arg; 1352 1353 multilist_sublist_t *ms = 1354 multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx); 1355 1356 dmu_objset_sync_dnodes(ms, sda->sda_tx); 1357 1358 multilist_sublist_unlock(ms); 1359 1360 kmem_free(sda, sizeof (*sda)); 1361 } 1362 1363 1364 /* called from dsl */ 1365 void 1366 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) 1367 { 1368 int txgoff; 1369 zbookmark_phys_t zb; 1370 zio_prop_t zp; 1371 zio_t *zio; 1372 list_t *list; 1373 dbuf_dirty_record_t *dr; 1374 blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP); 1375 *blkptr_copy = *os->os_rootbp; 1376 1377 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 1378 1379 ASSERT(dmu_tx_is_syncing(tx)); 1380 /* XXX the write_done callback should really give us the tx... */ 1381 os->os_synctx = tx; 1382 1383 if (os->os_dsl_dataset == NULL) { 1384 /* 1385 * This is the MOS. If we have upgraded, 1386 * spa_max_replication() could change, so reset 1387 * os_copies here. 1388 */ 1389 os->os_copies = spa_max_replication(os->os_spa); 1390 } 1391 1392 /* 1393 * Create the root block IO 1394 */ 1395 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 1396 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1397 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1398 arc_release(os->os_phys_buf, &os->os_phys_buf); 1399 1400 dmu_write_policy(os, NULL, 0, 0, &zp); 1401 1402 zio = arc_write(pio, os->os_spa, tx->tx_txg, 1403 blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), 1404 &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done, 1405 os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 1406 1407 /* 1408 * Sync special dnodes - the parent IO for the sync is the root block 1409 */ 1410 DMU_META_DNODE(os)->dn_zio = zio; 1411 dnode_sync(DMU_META_DNODE(os), tx); 1412 1413 os->os_phys->os_flags = os->os_flags; 1414 1415 if (DMU_USERUSED_DNODE(os) && 1416 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { 1417 DMU_USERUSED_DNODE(os)->dn_zio = zio; 1418 dnode_sync(DMU_USERUSED_DNODE(os), tx); 1419 DMU_GROUPUSED_DNODE(os)->dn_zio = zio; 1420 dnode_sync(DMU_GROUPUSED_DNODE(os), tx); 1421 } 1422 1423 txgoff = tx->tx_txg & TXG_MASK; 1424 1425 if (dmu_objset_userused_enabled(os)) { 1426 /* 1427 * We must create the list here because it uses the 1428 * dn_dirty_link[] of this txg. But it may already 1429 * exist because we call dsl_dataset_sync() twice per txg. 1430 */ 1431 if (os->os_synced_dnodes == NULL) { 1432 os->os_synced_dnodes = 1433 multilist_create(sizeof (dnode_t), 1434 offsetof(dnode_t, dn_dirty_link[txgoff]), 1435 dnode_multilist_index_func); 1436 } else { 1437 ASSERT3U(os->os_synced_dnodes->ml_offset, ==, 1438 offsetof(dnode_t, dn_dirty_link[txgoff])); 1439 } 1440 } 1441 1442 for (int i = 0; 1443 i < multilist_get_num_sublists(os->os_dirty_dnodes[txgoff]); i++) { 1444 sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP); 1445 sda->sda_list = os->os_dirty_dnodes[txgoff]; 1446 sda->sda_sublist_idx = i; 1447 sda->sda_tx = tx; 1448 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq, 1449 sync_dnodes_task, sda, 0); 1450 /* callback frees sda */ 1451 } 1452 taskq_wait(dmu_objset_pool(os)->dp_sync_taskq); 1453 1454 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; 1455 while ((dr = list_head(list)) != NULL) { 1456 ASSERT0(dr->dr_dbuf->db_level); 1457 list_remove(list, dr); 1458 if (dr->dr_zio) 1459 zio_nowait(dr->dr_zio); 1460 } 1461 1462 /* Enable dnode backfill if enough objects have been freed. */ 1463 if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) { 1464 os->os_rescan_dnodes = B_TRUE; 1465 os->os_freed_dnodes = 0; 1466 } 1467 1468 /* 1469 * Free intent log blocks up to this tx. 1470 */ 1471 zil_sync(os->os_zil, tx); 1472 os->os_phys->os_zil_header = os->os_zil_header; 1473 zio_nowait(zio); 1474 } 1475 1476 boolean_t 1477 dmu_objset_is_dirty(objset_t *os, uint64_t txg) 1478 { 1479 return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK])); 1480 } 1481 1482 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; 1483 1484 void 1485 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) 1486 { 1487 used_cbs[ost] = cb; 1488 } 1489 1490 boolean_t 1491 dmu_objset_userused_enabled(objset_t *os) 1492 { 1493 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && 1494 used_cbs[os->os_phys->os_type] != NULL && 1495 DMU_USERUSED_DNODE(os) != NULL); 1496 } 1497 1498 typedef struct userquota_node { 1499 uint64_t uqn_id; 1500 int64_t uqn_delta; 1501 avl_node_t uqn_node; 1502 } userquota_node_t; 1503 1504 typedef struct userquota_cache { 1505 avl_tree_t uqc_user_deltas; 1506 avl_tree_t uqc_group_deltas; 1507 } userquota_cache_t; 1508 1509 static int 1510 userquota_compare(const void *l, const void *r) 1511 { 1512 const userquota_node_t *luqn = l; 1513 const userquota_node_t *ruqn = r; 1514 1515 if (luqn->uqn_id < ruqn->uqn_id) 1516 return (-1); 1517 if (luqn->uqn_id > ruqn->uqn_id) 1518 return (1); 1519 return (0); 1520 } 1521 1522 static void 1523 do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx) 1524 { 1525 void *cookie; 1526 userquota_node_t *uqn; 1527 1528 ASSERT(dmu_tx_is_syncing(tx)); 1529 1530 cookie = NULL; 1531 while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas, 1532 &cookie)) != NULL) { 1533 /* 1534 * os_userused_lock protects against concurrent calls to 1535 * zap_increment_int(). It's needed because zap_increment_int() 1536 * is not thread-safe (i.e. not atomic). 1537 */ 1538 mutex_enter(&os->os_userused_lock); 1539 VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT, 1540 uqn->uqn_id, uqn->uqn_delta, tx)); 1541 mutex_exit(&os->os_userused_lock); 1542 kmem_free(uqn, sizeof (*uqn)); 1543 } 1544 avl_destroy(&cache->uqc_user_deltas); 1545 1546 cookie = NULL; 1547 while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas, 1548 &cookie)) != NULL) { 1549 mutex_enter(&os->os_userused_lock); 1550 VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT, 1551 uqn->uqn_id, uqn->uqn_delta, tx)); 1552 mutex_exit(&os->os_userused_lock); 1553 kmem_free(uqn, sizeof (*uqn)); 1554 } 1555 avl_destroy(&cache->uqc_group_deltas); 1556 } 1557 1558 static void 1559 userquota_update_cache(avl_tree_t *avl, uint64_t id, int64_t delta) 1560 { 1561 userquota_node_t search = { .uqn_id = id }; 1562 avl_index_t idx; 1563 1564 userquota_node_t *uqn = avl_find(avl, &search, &idx); 1565 if (uqn == NULL) { 1566 uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP); 1567 uqn->uqn_id = id; 1568 avl_insert(avl, uqn, idx); 1569 } 1570 uqn->uqn_delta += delta; 1571 } 1572 1573 static void 1574 do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags, 1575 uint64_t user, uint64_t group, boolean_t subtract) 1576 { 1577 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) { 1578 int64_t delta = DNODE_MIN_SIZE + used; 1579 if (subtract) 1580 delta = -delta; 1581 1582 userquota_update_cache(&cache->uqc_user_deltas, user, delta); 1583 userquota_update_cache(&cache->uqc_group_deltas, group, delta); 1584 } 1585 } 1586 1587 typedef struct userquota_updates_arg { 1588 objset_t *uua_os; 1589 int uua_sublist_idx; 1590 dmu_tx_t *uua_tx; 1591 } userquota_updates_arg_t; 1592 1593 static void 1594 userquota_updates_task(void *arg) 1595 { 1596 userquota_updates_arg_t *uua = arg; 1597 objset_t *os = uua->uua_os; 1598 dmu_tx_t *tx = uua->uua_tx; 1599 dnode_t *dn; 1600 userquota_cache_t cache = { 0 }; 1601 1602 multilist_sublist_t *list = 1603 multilist_sublist_lock(os->os_synced_dnodes, uua->uua_sublist_idx); 1604 1605 ASSERT(multilist_sublist_head(list) == NULL || 1606 dmu_objset_userused_enabled(os)); 1607 avl_create(&cache.uqc_user_deltas, userquota_compare, 1608 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); 1609 avl_create(&cache.uqc_group_deltas, userquota_compare, 1610 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); 1611 1612 while ((dn = multilist_sublist_head(list)) != NULL) { 1613 int flags; 1614 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); 1615 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || 1616 dn->dn_phys->dn_flags & 1617 DNODE_FLAG_USERUSED_ACCOUNTED); 1618 1619 flags = dn->dn_id_flags; 1620 ASSERT(flags); 1621 if (flags & DN_ID_OLD_EXIST) { 1622 do_userquota_update(&cache, 1623 dn->dn_oldused, dn->dn_oldflags, 1624 dn->dn_olduid, dn->dn_oldgid, B_TRUE); 1625 } 1626 if (flags & DN_ID_NEW_EXIST) { 1627 do_userquota_update(&cache, 1628 DN_USED_BYTES(dn->dn_phys), 1629 dn->dn_phys->dn_flags, dn->dn_newuid, 1630 dn->dn_newgid, B_FALSE); 1631 } 1632 1633 mutex_enter(&dn->dn_mtx); 1634 dn->dn_oldused = 0; 1635 dn->dn_oldflags = 0; 1636 if (dn->dn_id_flags & DN_ID_NEW_EXIST) { 1637 dn->dn_olduid = dn->dn_newuid; 1638 dn->dn_oldgid = dn->dn_newgid; 1639 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1640 if (dn->dn_bonuslen == 0) 1641 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1642 else 1643 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1644 } 1645 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); 1646 if (dn->dn_dirty_txg == spa_syncing_txg(os->os_spa)) 1647 dn->dn_dirty_txg = 0; 1648 mutex_exit(&dn->dn_mtx); 1649 1650 multilist_sublist_remove(list, dn); 1651 dnode_rele(dn, os->os_synced_dnodes); 1652 } 1653 do_userquota_cacheflush(os, &cache, tx); 1654 multilist_sublist_unlock(list); 1655 kmem_free(uua, sizeof (*uua)); 1656 } 1657 1658 void 1659 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) 1660 { 1661 if (!dmu_objset_userused_enabled(os)) 1662 return; 1663 1664 /* Allocate the user/groupused objects if necessary. */ 1665 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { 1666 VERIFY0(zap_create_claim(os, 1667 DMU_USERUSED_OBJECT, 1668 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1669 VERIFY0(zap_create_claim(os, 1670 DMU_GROUPUSED_OBJECT, 1671 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1672 } 1673 1674 for (int i = 0; 1675 i < multilist_get_num_sublists(os->os_synced_dnodes); i++) { 1676 userquota_updates_arg_t *uua = 1677 kmem_alloc(sizeof (*uua), KM_SLEEP); 1678 uua->uua_os = os; 1679 uua->uua_sublist_idx = i; 1680 uua->uua_tx = tx; 1681 /* note: caller does taskq_wait() */ 1682 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq, 1683 userquota_updates_task, uua, 0); 1684 /* callback frees uua */ 1685 } 1686 } 1687 1688 /* 1689 * Returns a pointer to data to find uid/gid from 1690 * 1691 * If a dirty record for transaction group that is syncing can't 1692 * be found then NULL is returned. In the NULL case it is assumed 1693 * the uid/gid aren't changing. 1694 */ 1695 static void * 1696 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) 1697 { 1698 dbuf_dirty_record_t *dr, **drp; 1699 void *data; 1700 1701 if (db->db_dirtycnt == 0) 1702 return (db->db.db_data); /* Nothing is changing */ 1703 1704 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1705 if (dr->dr_txg == tx->tx_txg) 1706 break; 1707 1708 if (dr == NULL) { 1709 data = NULL; 1710 } else { 1711 dnode_t *dn; 1712 1713 DB_DNODE_ENTER(dr->dr_dbuf); 1714 dn = DB_DNODE(dr->dr_dbuf); 1715 1716 if (dn->dn_bonuslen == 0 && 1717 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) 1718 data = dr->dt.dl.dr_data->b_data; 1719 else 1720 data = dr->dt.dl.dr_data; 1721 1722 DB_DNODE_EXIT(dr->dr_dbuf); 1723 } 1724 1725 return (data); 1726 } 1727 1728 void 1729 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) 1730 { 1731 objset_t *os = dn->dn_objset; 1732 void *data = NULL; 1733 dmu_buf_impl_t *db = NULL; 1734 uint64_t *user = NULL; 1735 uint64_t *group = NULL; 1736 int flags = dn->dn_id_flags; 1737 int error; 1738 boolean_t have_spill = B_FALSE; 1739 1740 if (!dmu_objset_userused_enabled(dn->dn_objset)) 1741 return; 1742 1743 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| 1744 DN_ID_CHKED_SPILL))) 1745 return; 1746 1747 if (before && dn->dn_bonuslen != 0) 1748 data = DN_BONUS(dn->dn_phys); 1749 else if (!before && dn->dn_bonuslen != 0) { 1750 if (dn->dn_bonus) { 1751 db = dn->dn_bonus; 1752 mutex_enter(&db->db_mtx); 1753 data = dmu_objset_userquota_find_data(db, tx); 1754 } else { 1755 data = DN_BONUS(dn->dn_phys); 1756 } 1757 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { 1758 int rf = 0; 1759 1760 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) 1761 rf |= DB_RF_HAVESTRUCT; 1762 error = dmu_spill_hold_by_dnode(dn, 1763 rf | DB_RF_MUST_SUCCEED, 1764 FTAG, (dmu_buf_t **)&db); 1765 ASSERT(error == 0); 1766 mutex_enter(&db->db_mtx); 1767 data = (before) ? db->db.db_data : 1768 dmu_objset_userquota_find_data(db, tx); 1769 have_spill = B_TRUE; 1770 } else { 1771 mutex_enter(&dn->dn_mtx); 1772 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1773 mutex_exit(&dn->dn_mtx); 1774 return; 1775 } 1776 1777 if (before) { 1778 ASSERT(data); 1779 user = &dn->dn_olduid; 1780 group = &dn->dn_oldgid; 1781 } else if (data) { 1782 user = &dn->dn_newuid; 1783 group = &dn->dn_newgid; 1784 } 1785 1786 /* 1787 * Must always call the callback in case the object 1788 * type has changed and that type isn't an object type to track 1789 */ 1790 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, 1791 user, group); 1792 1793 /* 1794 * Preserve existing uid/gid when the callback can't determine 1795 * what the new uid/gid are and the callback returned EEXIST. 1796 * The EEXIST error tells us to just use the existing uid/gid. 1797 * If we don't know what the old values are then just assign 1798 * them to 0, since that is a new file being created. 1799 */ 1800 if (!before && data == NULL && error == EEXIST) { 1801 if (flags & DN_ID_OLD_EXIST) { 1802 dn->dn_newuid = dn->dn_olduid; 1803 dn->dn_newgid = dn->dn_oldgid; 1804 } else { 1805 dn->dn_newuid = 0; 1806 dn->dn_newgid = 0; 1807 } 1808 error = 0; 1809 } 1810 1811 if (db) 1812 mutex_exit(&db->db_mtx); 1813 1814 mutex_enter(&dn->dn_mtx); 1815 if (error == 0 && before) 1816 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1817 if (error == 0 && !before) 1818 dn->dn_id_flags |= DN_ID_NEW_EXIST; 1819 1820 if (have_spill) { 1821 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1822 } else { 1823 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1824 } 1825 mutex_exit(&dn->dn_mtx); 1826 if (have_spill) 1827 dmu_buf_rele((dmu_buf_t *)db, FTAG); 1828 } 1829 1830 boolean_t 1831 dmu_objset_userspace_present(objset_t *os) 1832 { 1833 return (os->os_phys->os_flags & 1834 OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1835 } 1836 1837 int 1838 dmu_objset_userspace_upgrade(objset_t *os) 1839 { 1840 uint64_t obj; 1841 int err = 0; 1842 1843 if (dmu_objset_userspace_present(os)) 1844 return (0); 1845 if (!dmu_objset_userused_enabled(os)) 1846 return (SET_ERROR(ENOTSUP)); 1847 if (dmu_objset_is_snapshot(os)) 1848 return (SET_ERROR(EINVAL)); 1849 1850 /* 1851 * We simply need to mark every object dirty, so that it will be 1852 * synced out and now accounted. If this is called 1853 * concurrently, or if we already did some work before crashing, 1854 * that's fine, since we track each object's accounted state 1855 * independently. 1856 */ 1857 1858 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 1859 dmu_tx_t *tx; 1860 dmu_buf_t *db; 1861 int objerr; 1862 1863 if (issig(JUSTLOOKING) && issig(FORREAL)) 1864 return (SET_ERROR(EINTR)); 1865 1866 objerr = dmu_bonus_hold(os, obj, FTAG, &db); 1867 if (objerr != 0) 1868 continue; 1869 tx = dmu_tx_create(os); 1870 dmu_tx_hold_bonus(tx, obj); 1871 objerr = dmu_tx_assign(tx, TXG_WAIT); 1872 if (objerr != 0) { 1873 dmu_tx_abort(tx); 1874 continue; 1875 } 1876 dmu_buf_will_dirty(db, tx); 1877 dmu_buf_rele(db, FTAG); 1878 dmu_tx_commit(tx); 1879 } 1880 1881 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 1882 txg_wait_synced(dmu_objset_pool(os), 0); 1883 return (0); 1884 } 1885 1886 void 1887 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 1888 uint64_t *usedobjsp, uint64_t *availobjsp) 1889 { 1890 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, 1891 usedobjsp, availobjsp); 1892 } 1893 1894 uint64_t 1895 dmu_objset_fsid_guid(objset_t *os) 1896 { 1897 return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); 1898 } 1899 1900 void 1901 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 1902 { 1903 stat->dds_type = os->os_phys->os_type; 1904 if (os->os_dsl_dataset) 1905 dsl_dataset_fast_stat(os->os_dsl_dataset, stat); 1906 } 1907 1908 void 1909 dmu_objset_stats(objset_t *os, nvlist_t *nv) 1910 { 1911 ASSERT(os->os_dsl_dataset || 1912 os->os_phys->os_type == DMU_OST_META); 1913 1914 if (os->os_dsl_dataset != NULL) 1915 dsl_dataset_stats(os->os_dsl_dataset, nv); 1916 1917 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 1918 os->os_phys->os_type); 1919 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, 1920 dmu_objset_userspace_present(os)); 1921 } 1922 1923 int 1924 dmu_objset_is_snapshot(objset_t *os) 1925 { 1926 if (os->os_dsl_dataset != NULL) 1927 return (os->os_dsl_dataset->ds_is_snapshot); 1928 else 1929 return (B_FALSE); 1930 } 1931 1932 int 1933 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 1934 boolean_t *conflict) 1935 { 1936 dsl_dataset_t *ds = os->os_dsl_dataset; 1937 uint64_t ignored; 1938 1939 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1940 return (SET_ERROR(ENOENT)); 1941 1942 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 1943 dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored, 1944 MT_NORMALIZE, real, maxlen, conflict)); 1945 } 1946 1947 int 1948 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 1949 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 1950 { 1951 dsl_dataset_t *ds = os->os_dsl_dataset; 1952 zap_cursor_t cursor; 1953 zap_attribute_t attr; 1954 1955 ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); 1956 1957 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1958 return (SET_ERROR(ENOENT)); 1959 1960 zap_cursor_init_serialized(&cursor, 1961 ds->ds_dir->dd_pool->dp_meta_objset, 1962 dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp); 1963 1964 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1965 zap_cursor_fini(&cursor); 1966 return (SET_ERROR(ENOENT)); 1967 } 1968 1969 if (strlen(attr.za_name) + 1 > namelen) { 1970 zap_cursor_fini(&cursor); 1971 return (SET_ERROR(ENAMETOOLONG)); 1972 } 1973 1974 (void) strcpy(name, attr.za_name); 1975 if (idp) 1976 *idp = attr.za_first_integer; 1977 if (case_conflict) 1978 *case_conflict = attr.za_normalization_conflict; 1979 zap_cursor_advance(&cursor); 1980 *offp = zap_cursor_serialize(&cursor); 1981 zap_cursor_fini(&cursor); 1982 1983 return (0); 1984 } 1985 1986 int 1987 dmu_dir_list_next(objset_t *os, int namelen, char *name, 1988 uint64_t *idp, uint64_t *offp) 1989 { 1990 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; 1991 zap_cursor_t cursor; 1992 zap_attribute_t attr; 1993 1994 /* there is no next dir on a snapshot! */ 1995 if (os->os_dsl_dataset->ds_object != 1996 dsl_dir_phys(dd)->dd_head_dataset_obj) 1997 return (SET_ERROR(ENOENT)); 1998 1999 zap_cursor_init_serialized(&cursor, 2000 dd->dd_pool->dp_meta_objset, 2001 dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp); 2002 2003 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 2004 zap_cursor_fini(&cursor); 2005 return (SET_ERROR(ENOENT)); 2006 } 2007 2008 if (strlen(attr.za_name) + 1 > namelen) { 2009 zap_cursor_fini(&cursor); 2010 return (SET_ERROR(ENAMETOOLONG)); 2011 } 2012 2013 (void) strcpy(name, attr.za_name); 2014 if (idp) 2015 *idp = attr.za_first_integer; 2016 zap_cursor_advance(&cursor); 2017 *offp = zap_cursor_serialize(&cursor); 2018 zap_cursor_fini(&cursor); 2019 2020 return (0); 2021 } 2022 2023 typedef struct dmu_objset_find_ctx { 2024 taskq_t *dc_tq; 2025 dsl_pool_t *dc_dp; 2026 uint64_t dc_ddobj; 2027 char *dc_ddname; /* last component of ddobj's name */ 2028 int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *); 2029 void *dc_arg; 2030 int dc_flags; 2031 kmutex_t *dc_error_lock; 2032 int *dc_error; 2033 } dmu_objset_find_ctx_t; 2034 2035 static void 2036 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp) 2037 { 2038 dsl_pool_t *dp = dcp->dc_dp; 2039 dsl_dir_t *dd; 2040 dsl_dataset_t *ds; 2041 zap_cursor_t zc; 2042 zap_attribute_t *attr; 2043 uint64_t thisobj; 2044 int err = 0; 2045 2046 /* don't process if there already was an error */ 2047 if (*dcp->dc_error != 0) 2048 goto out; 2049 2050 /* 2051 * Note: passing the name (dc_ddname) here is optional, but it 2052 * improves performance because we don't need to call 2053 * zap_value_search() to determine the name. 2054 */ 2055 err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd); 2056 if (err != 0) 2057 goto out; 2058 2059 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 2060 if (dd->dd_myname[0] == '$') { 2061 dsl_dir_rele(dd, FTAG); 2062 goto out; 2063 } 2064 2065 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 2066 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 2067 2068 /* 2069 * Iterate over all children. 2070 */ 2071 if (dcp->dc_flags & DS_FIND_CHILDREN) { 2072 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2073 dsl_dir_phys(dd)->dd_child_dir_zapobj); 2074 zap_cursor_retrieve(&zc, attr) == 0; 2075 (void) zap_cursor_advance(&zc)) { 2076 ASSERT3U(attr->za_integer_length, ==, 2077 sizeof (uint64_t)); 2078 ASSERT3U(attr->za_num_integers, ==, 1); 2079 2080 dmu_objset_find_ctx_t *child_dcp = 2081 kmem_alloc(sizeof (*child_dcp), KM_SLEEP); 2082 *child_dcp = *dcp; 2083 child_dcp->dc_ddobj = attr->za_first_integer; 2084 child_dcp->dc_ddname = spa_strdup(attr->za_name); 2085 if (dcp->dc_tq != NULL) 2086 (void) taskq_dispatch(dcp->dc_tq, 2087 dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP); 2088 else 2089 dmu_objset_find_dp_impl(child_dcp); 2090 } 2091 zap_cursor_fini(&zc); 2092 } 2093 2094 /* 2095 * Iterate over all snapshots. 2096 */ 2097 if (dcp->dc_flags & DS_FIND_SNAPSHOTS) { 2098 dsl_dataset_t *ds; 2099 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 2100 2101 if (err == 0) { 2102 uint64_t snapobj; 2103 2104 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 2105 dsl_dataset_rele(ds, FTAG); 2106 2107 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 2108 zap_cursor_retrieve(&zc, attr) == 0; 2109 (void) zap_cursor_advance(&zc)) { 2110 ASSERT3U(attr->za_integer_length, ==, 2111 sizeof (uint64_t)); 2112 ASSERT3U(attr->za_num_integers, ==, 1); 2113 2114 err = dsl_dataset_hold_obj(dp, 2115 attr->za_first_integer, FTAG, &ds); 2116 if (err != 0) 2117 break; 2118 err = dcp->dc_func(dp, ds, dcp->dc_arg); 2119 dsl_dataset_rele(ds, FTAG); 2120 if (err != 0) 2121 break; 2122 } 2123 zap_cursor_fini(&zc); 2124 } 2125 } 2126 2127 kmem_free(attr, sizeof (zap_attribute_t)); 2128 2129 if (err != 0) { 2130 dsl_dir_rele(dd, FTAG); 2131 goto out; 2132 } 2133 2134 /* 2135 * Apply to self. 2136 */ 2137 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 2138 2139 /* 2140 * Note: we hold the dir while calling dsl_dataset_hold_obj() so 2141 * that the dir will remain cached, and we won't have to re-instantiate 2142 * it (which could be expensive due to finding its name via 2143 * zap_value_search()). 2144 */ 2145 dsl_dir_rele(dd, FTAG); 2146 if (err != 0) 2147 goto out; 2148 err = dcp->dc_func(dp, ds, dcp->dc_arg); 2149 dsl_dataset_rele(ds, FTAG); 2150 2151 out: 2152 if (err != 0) { 2153 mutex_enter(dcp->dc_error_lock); 2154 /* only keep first error */ 2155 if (*dcp->dc_error == 0) 2156 *dcp->dc_error = err; 2157 mutex_exit(dcp->dc_error_lock); 2158 } 2159 2160 if (dcp->dc_ddname != NULL) 2161 spa_strfree(dcp->dc_ddname); 2162 kmem_free(dcp, sizeof (*dcp)); 2163 } 2164 2165 static void 2166 dmu_objset_find_dp_cb(void *arg) 2167 { 2168 dmu_objset_find_ctx_t *dcp = arg; 2169 dsl_pool_t *dp = dcp->dc_dp; 2170 2171 /* 2172 * We need to get a pool_config_lock here, as there are several 2173 * asssert(pool_config_held) down the stack. Getting a lock via 2174 * dsl_pool_config_enter is risky, as it might be stalled by a 2175 * pending writer. This would deadlock, as the write lock can 2176 * only be granted when our parent thread gives up the lock. 2177 * The _prio interface gives us priority over a pending writer. 2178 */ 2179 dsl_pool_config_enter_prio(dp, FTAG); 2180 2181 dmu_objset_find_dp_impl(dcp); 2182 2183 dsl_pool_config_exit(dp, FTAG); 2184 } 2185 2186 /* 2187 * Find objsets under and including ddobj, call func(ds) on each. 2188 * The order for the enumeration is completely undefined. 2189 * func is called with dsl_pool_config held. 2190 */ 2191 int 2192 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, 2193 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) 2194 { 2195 int error = 0; 2196 taskq_t *tq = NULL; 2197 int ntasks; 2198 dmu_objset_find_ctx_t *dcp; 2199 kmutex_t err_lock; 2200 2201 mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL); 2202 dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP); 2203 dcp->dc_tq = NULL; 2204 dcp->dc_dp = dp; 2205 dcp->dc_ddobj = ddobj; 2206 dcp->dc_ddname = NULL; 2207 dcp->dc_func = func; 2208 dcp->dc_arg = arg; 2209 dcp->dc_flags = flags; 2210 dcp->dc_error_lock = &err_lock; 2211 dcp->dc_error = &error; 2212 2213 if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) { 2214 /* 2215 * In case a write lock is held we can't make use of 2216 * parallelism, as down the stack of the worker threads 2217 * the lock is asserted via dsl_pool_config_held. 2218 * In case of a read lock this is solved by getting a read 2219 * lock in each worker thread, which isn't possible in case 2220 * of a writer lock. So we fall back to the synchronous path 2221 * here. 2222 * In the future it might be possible to get some magic into 2223 * dsl_pool_config_held in a way that it returns true for 2224 * the worker threads so that a single lock held from this 2225 * thread suffices. For now, stay single threaded. 2226 */ 2227 dmu_objset_find_dp_impl(dcp); 2228 mutex_destroy(&err_lock); 2229 2230 return (error); 2231 } 2232 2233 ntasks = dmu_find_threads; 2234 if (ntasks == 0) 2235 ntasks = vdev_count_leaves(dp->dp_spa) * 4; 2236 tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks, 2237 INT_MAX, 0); 2238 if (tq == NULL) { 2239 kmem_free(dcp, sizeof (*dcp)); 2240 mutex_destroy(&err_lock); 2241 2242 return (SET_ERROR(ENOMEM)); 2243 } 2244 dcp->dc_tq = tq; 2245 2246 /* dcp will be freed by task */ 2247 (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP); 2248 2249 /* 2250 * PORTING: this code relies on the property of taskq_wait to wait 2251 * until no more tasks are queued and no more tasks are active. As 2252 * we always queue new tasks from within other tasks, task_wait 2253 * reliably waits for the full recursion to finish, even though we 2254 * enqueue new tasks after taskq_wait has been called. 2255 * On platforms other than illumos, taskq_wait may not have this 2256 * property. 2257 */ 2258 taskq_wait(tq); 2259 taskq_destroy(tq); 2260 mutex_destroy(&err_lock); 2261 2262 return (error); 2263 } 2264 2265 /* 2266 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 2267 * The dp_config_rwlock must not be held when this is called, and it 2268 * will not be held when the callback is called. 2269 * Therefore this function should only be used when the pool is not changing 2270 * (e.g. in syncing context), or the callback can deal with the possible races. 2271 */ 2272 static int 2273 dmu_objset_find_impl(spa_t *spa, const char *name, 2274 int func(const char *, void *), void *arg, int flags) 2275 { 2276 dsl_dir_t *dd; 2277 dsl_pool_t *dp = spa_get_dsl(spa); 2278 dsl_dataset_t *ds; 2279 zap_cursor_t zc; 2280 zap_attribute_t *attr; 2281 char *child; 2282 uint64_t thisobj; 2283 int err; 2284 2285 dsl_pool_config_enter(dp, FTAG); 2286 2287 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); 2288 if (err != 0) { 2289 dsl_pool_config_exit(dp, FTAG); 2290 return (err); 2291 } 2292 2293 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 2294 if (dd->dd_myname[0] == '$') { 2295 dsl_dir_rele(dd, FTAG); 2296 dsl_pool_config_exit(dp, FTAG); 2297 return (0); 2298 } 2299 2300 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 2301 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 2302 2303 /* 2304 * Iterate over all children. 2305 */ 2306 if (flags & DS_FIND_CHILDREN) { 2307 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2308 dsl_dir_phys(dd)->dd_child_dir_zapobj); 2309 zap_cursor_retrieve(&zc, attr) == 0; 2310 (void) zap_cursor_advance(&zc)) { 2311 ASSERT3U(attr->za_integer_length, ==, 2312 sizeof (uint64_t)); 2313 ASSERT3U(attr->za_num_integers, ==, 1); 2314 2315 child = kmem_asprintf("%s/%s", name, attr->za_name); 2316 dsl_pool_config_exit(dp, FTAG); 2317 err = dmu_objset_find_impl(spa, child, 2318 func, arg, flags); 2319 dsl_pool_config_enter(dp, FTAG); 2320 strfree(child); 2321 if (err != 0) 2322 break; 2323 } 2324 zap_cursor_fini(&zc); 2325 2326 if (err != 0) { 2327 dsl_dir_rele(dd, FTAG); 2328 dsl_pool_config_exit(dp, FTAG); 2329 kmem_free(attr, sizeof (zap_attribute_t)); 2330 return (err); 2331 } 2332 } 2333 2334 /* 2335 * Iterate over all snapshots. 2336 */ 2337 if (flags & DS_FIND_SNAPSHOTS) { 2338 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 2339 2340 if (err == 0) { 2341 uint64_t snapobj; 2342 2343 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 2344 dsl_dataset_rele(ds, FTAG); 2345 2346 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 2347 zap_cursor_retrieve(&zc, attr) == 0; 2348 (void) zap_cursor_advance(&zc)) { 2349 ASSERT3U(attr->za_integer_length, ==, 2350 sizeof (uint64_t)); 2351 ASSERT3U(attr->za_num_integers, ==, 1); 2352 2353 child = kmem_asprintf("%s@%s", 2354 name, attr->za_name); 2355 dsl_pool_config_exit(dp, FTAG); 2356 err = func(child, arg); 2357 dsl_pool_config_enter(dp, FTAG); 2358 strfree(child); 2359 if (err != 0) 2360 break; 2361 } 2362 zap_cursor_fini(&zc); 2363 } 2364 } 2365 2366 dsl_dir_rele(dd, FTAG); 2367 kmem_free(attr, sizeof (zap_attribute_t)); 2368 dsl_pool_config_exit(dp, FTAG); 2369 2370 if (err != 0) 2371 return (err); 2372 2373 /* Apply to self. */ 2374 return (func(name, arg)); 2375 } 2376 2377 /* 2378 * See comment above dmu_objset_find_impl(). 2379 */ 2380 int 2381 dmu_objset_find(char *name, int func(const char *, void *), void *arg, 2382 int flags) 2383 { 2384 spa_t *spa; 2385 int error; 2386 2387 error = spa_open(name, &spa, FTAG); 2388 if (error != 0) 2389 return (error); 2390 error = dmu_objset_find_impl(spa, name, func, arg, flags); 2391 spa_close(spa, FTAG); 2392 return (error); 2393 } 2394 2395 void 2396 dmu_objset_set_user(objset_t *os, void *user_ptr) 2397 { 2398 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 2399 os->os_user_ptr = user_ptr; 2400 } 2401 2402 void * 2403 dmu_objset_get_user(objset_t *os) 2404 { 2405 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 2406 return (os->os_user_ptr); 2407 } 2408 2409 /* 2410 * Determine name of filesystem, given name of snapshot. 2411 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes 2412 */ 2413 int 2414 dmu_fsname(const char *snapname, char *buf) 2415 { 2416 char *atp = strchr(snapname, '@'); 2417 if (atp == NULL) 2418 return (SET_ERROR(EINVAL)); 2419 if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN) 2420 return (SET_ERROR(ENAMETOOLONG)); 2421 (void) strlcpy(buf, snapname, atp - snapname + 1); 2422 return (0); 2423 } 2424 2425 /* 2426 * Call when we think we're going to write/free space in open context to track 2427 * the amount of dirty data in the open txg, which is also the amount 2428 * of memory that can not be evicted until this txg syncs. 2429 */ 2430 void 2431 dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx) 2432 { 2433 dsl_dataset_t *ds = os->os_dsl_dataset; 2434 int64_t aspace = spa_get_worst_case_asize(os->os_spa, space); 2435 2436 if (ds != NULL) { 2437 dsl_dir_willuse_space(ds->ds_dir, aspace, tx); 2438 dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx); 2439 } 2440 } 2441