1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved. 29 * Copyright (c) 2014 Integros [integros.com] 30 * Copyright 2017 Nexenta Systems, Inc. 31 */ 32 33 /* Portions Copyright 2010 Robert Milkowski */ 34 35 #include <sys/cred.h> 36 #include <sys/zfs_context.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/dsl_prop.h> 41 #include <sys/dsl_pool.h> 42 #include <sys/dsl_synctask.h> 43 #include <sys/dsl_deleg.h> 44 #include <sys/dnode.h> 45 #include <sys/dbuf.h> 46 #include <sys/zvol.h> 47 #include <sys/dmu_tx.h> 48 #include <sys/zap.h> 49 #include <sys/zil.h> 50 #include <sys/dmu_impl.h> 51 #include <sys/zfs_ioctl.h> 52 #include <sys/sa.h> 53 #include <sys/zfs_onexit.h> 54 #include <sys/dsl_destroy.h> 55 #include <sys/vdev.h> 56 #include <sys/zfeature.h> 57 #include "zfs_namecheck.h" 58 59 /* 60 * Needed to close a window in dnode_move() that allows the objset to be freed 61 * before it can be safely accessed. 62 */ 63 krwlock_t os_lock; 64 65 /* 66 * Tunable to overwrite the maximum number of threads for the parallization 67 * of dmu_objset_find_dp, needed to speed up the import of pools with many 68 * datasets. 69 * Default is 4 times the number of leaf vdevs. 70 */ 71 int dmu_find_threads = 0; 72 73 /* 74 * Backfill lower metadnode objects after this many have been freed. 75 * Backfilling negatively impacts object creation rates, so only do it 76 * if there are enough holes to fill. 77 */ 78 int dmu_rescan_dnode_threshold = 131072; 79 80 static void dmu_objset_find_dp_cb(void *arg); 81 82 void 83 dmu_objset_init(void) 84 { 85 rw_init(&os_lock, NULL, RW_DEFAULT, NULL); 86 } 87 88 void 89 dmu_objset_fini(void) 90 { 91 rw_destroy(&os_lock); 92 } 93 94 spa_t * 95 dmu_objset_spa(objset_t *os) 96 { 97 return (os->os_spa); 98 } 99 100 zilog_t * 101 dmu_objset_zil(objset_t *os) 102 { 103 return (os->os_zil); 104 } 105 106 dsl_pool_t * 107 dmu_objset_pool(objset_t *os) 108 { 109 dsl_dataset_t *ds; 110 111 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) 112 return (ds->ds_dir->dd_pool); 113 else 114 return (spa_get_dsl(os->os_spa)); 115 } 116 117 dsl_dataset_t * 118 dmu_objset_ds(objset_t *os) 119 { 120 return (os->os_dsl_dataset); 121 } 122 123 dmu_objset_type_t 124 dmu_objset_type(objset_t *os) 125 { 126 return (os->os_phys->os_type); 127 } 128 129 void 130 dmu_objset_name(objset_t *os, char *buf) 131 { 132 dsl_dataset_name(os->os_dsl_dataset, buf); 133 } 134 135 uint64_t 136 dmu_objset_id(objset_t *os) 137 { 138 dsl_dataset_t *ds = os->os_dsl_dataset; 139 140 return (ds ? ds->ds_object : 0); 141 } 142 143 zfs_sync_type_t 144 dmu_objset_syncprop(objset_t *os) 145 { 146 return (os->os_sync); 147 } 148 149 zfs_logbias_op_t 150 dmu_objset_logbias(objset_t *os) 151 { 152 return (os->os_logbias); 153 } 154 155 static void 156 checksum_changed_cb(void *arg, uint64_t newval) 157 { 158 objset_t *os = arg; 159 160 /* 161 * Inheritance should have been done by now. 162 */ 163 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 164 165 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 166 } 167 168 static void 169 compression_changed_cb(void *arg, uint64_t newval) 170 { 171 objset_t *os = arg; 172 173 /* 174 * Inheritance and range checking should have been done by now. 175 */ 176 ASSERT(newval != ZIO_COMPRESS_INHERIT); 177 178 os->os_compress = zio_compress_select(os->os_spa, newval, 179 ZIO_COMPRESS_ON); 180 } 181 182 static void 183 copies_changed_cb(void *arg, uint64_t newval) 184 { 185 objset_t *os = arg; 186 187 /* 188 * Inheritance and range checking should have been done by now. 189 */ 190 ASSERT(newval > 0); 191 ASSERT(newval <= spa_max_replication(os->os_spa)); 192 193 os->os_copies = newval; 194 } 195 196 static void 197 dedup_changed_cb(void *arg, uint64_t newval) 198 { 199 objset_t *os = arg; 200 spa_t *spa = os->os_spa; 201 enum zio_checksum checksum; 202 203 /* 204 * Inheritance should have been done by now. 205 */ 206 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 207 208 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); 209 210 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; 211 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); 212 } 213 214 static void 215 primary_cache_changed_cb(void *arg, uint64_t newval) 216 { 217 objset_t *os = arg; 218 219 /* 220 * Inheritance and range checking should have been done by now. 221 */ 222 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 223 newval == ZFS_CACHE_METADATA); 224 225 os->os_primary_cache = newval; 226 } 227 228 static void 229 secondary_cache_changed_cb(void *arg, uint64_t newval) 230 { 231 objset_t *os = arg; 232 233 /* 234 * Inheritance and range checking should have been done by now. 235 */ 236 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 237 newval == ZFS_CACHE_METADATA); 238 239 os->os_secondary_cache = newval; 240 } 241 242 static void 243 sync_changed_cb(void *arg, uint64_t newval) 244 { 245 objset_t *os = arg; 246 247 /* 248 * Inheritance and range checking should have been done by now. 249 */ 250 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || 251 newval == ZFS_SYNC_DISABLED); 252 253 os->os_sync = newval; 254 if (os->os_zil) 255 zil_set_sync(os->os_zil, newval); 256 } 257 258 static void 259 redundant_metadata_changed_cb(void *arg, uint64_t newval) 260 { 261 objset_t *os = arg; 262 263 /* 264 * Inheritance and range checking should have been done by now. 265 */ 266 ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL || 267 newval == ZFS_REDUNDANT_METADATA_MOST); 268 269 os->os_redundant_metadata = newval; 270 } 271 272 static void 273 logbias_changed_cb(void *arg, uint64_t newval) 274 { 275 objset_t *os = arg; 276 277 ASSERT(newval == ZFS_LOGBIAS_LATENCY || 278 newval == ZFS_LOGBIAS_THROUGHPUT); 279 os->os_logbias = newval; 280 if (os->os_zil) 281 zil_set_logbias(os->os_zil, newval); 282 } 283 284 static void 285 recordsize_changed_cb(void *arg, uint64_t newval) 286 { 287 objset_t *os = arg; 288 289 os->os_recordsize = newval; 290 } 291 292 void 293 dmu_objset_byteswap(void *buf, size_t size) 294 { 295 objset_phys_t *osp = buf; 296 297 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); 298 dnode_byteswap(&osp->os_meta_dnode); 299 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 300 osp->os_type = BSWAP_64(osp->os_type); 301 osp->os_flags = BSWAP_64(osp->os_flags); 302 if (size == sizeof (objset_phys_t)) { 303 dnode_byteswap(&osp->os_userused_dnode); 304 dnode_byteswap(&osp->os_groupused_dnode); 305 } 306 } 307 308 /* 309 * The hash is a CRC-based hash of the objset_t pointer and the object number. 310 */ 311 static uint64_t 312 dnode_hash(const objset_t *os, uint64_t obj) 313 { 314 uintptr_t osv = (uintptr_t)os; 315 uint64_t crc = -1ULL; 316 317 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 318 /* 319 * The low 6 bits of the pointer don't have much entropy, because 320 * the objset_t is larger than 2^6 bytes long. 321 */ 322 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 323 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 324 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 325 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF]; 326 327 crc ^= (osv>>14) ^ (obj>>24); 328 329 return (crc); 330 } 331 332 unsigned int 333 dnode_multilist_index_func(multilist_t *ml, void *obj) 334 { 335 dnode_t *dn = obj; 336 return (dnode_hash(dn->dn_objset, dn->dn_object) % 337 multilist_get_num_sublists(ml)); 338 } 339 340 /* 341 * Instantiates the objset_t in-memory structure corresponding to the 342 * objset_phys_t that's pointed to by the specified blkptr_t. 343 */ 344 int 345 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 346 objset_t **osp) 347 { 348 objset_t *os; 349 int i, err; 350 351 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 352 353 /* 354 * The $ORIGIN dataset (if it exists) doesn't have an associated 355 * objset, so there's no reason to open it. The $ORIGIN dataset 356 * will not exist on pools older than SPA_VERSION_ORIGIN. 357 */ 358 if (ds != NULL && spa_get_dsl(spa) != NULL && 359 spa_get_dsl(spa)->dp_origin_snap != NULL) { 360 ASSERT3P(ds->ds_dir, !=, 361 spa_get_dsl(spa)->dp_origin_snap->ds_dir); 362 } 363 364 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); 365 os->os_dsl_dataset = ds; 366 os->os_spa = spa; 367 os->os_rootbp = bp; 368 if (!BP_IS_HOLE(os->os_rootbp)) { 369 arc_flags_t aflags = ARC_FLAG_WAIT; 370 zbookmark_phys_t zb; 371 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 372 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 373 374 if (DMU_OS_IS_L2CACHEABLE(os)) 375 aflags |= ARC_FLAG_L2CACHE; 376 377 dprintf_bp(os->os_rootbp, "reading %s", ""); 378 err = arc_read(NULL, spa, os->os_rootbp, 379 arc_getbuf_func, &os->os_phys_buf, 380 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 381 if (err != 0) { 382 kmem_free(os, sizeof (objset_t)); 383 /* convert checksum errors into IO errors */ 384 if (err == ECKSUM) 385 err = SET_ERROR(EIO); 386 return (err); 387 } 388 389 /* Increase the blocksize if we are permitted. */ 390 if (spa_version(spa) >= SPA_VERSION_USERSPACE && 391 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { 392 arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf, 393 ARC_BUFC_METADATA, sizeof (objset_phys_t)); 394 bzero(buf->b_data, sizeof (objset_phys_t)); 395 bcopy(os->os_phys_buf->b_data, buf->b_data, 396 arc_buf_size(os->os_phys_buf)); 397 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 398 os->os_phys_buf = buf; 399 } 400 401 os->os_phys = os->os_phys_buf->b_data; 402 os->os_flags = os->os_phys->os_flags; 403 } else { 404 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? 405 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; 406 os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf, 407 ARC_BUFC_METADATA, size); 408 os->os_phys = os->os_phys_buf->b_data; 409 bzero(os->os_phys, size); 410 } 411 412 /* 413 * Note: the changed_cb will be called once before the register 414 * func returns, thus changing the checksum/compression from the 415 * default (fletcher2/off). Snapshots don't need to know about 416 * checksum/compression/copies. 417 */ 418 if (ds != NULL) { 419 boolean_t needlock = B_FALSE; 420 421 /* 422 * Note: it's valid to open the objset if the dataset is 423 * long-held, in which case the pool_config lock will not 424 * be held. 425 */ 426 if (!dsl_pool_config_held(dmu_objset_pool(os))) { 427 needlock = B_TRUE; 428 dsl_pool_config_enter(dmu_objset_pool(os), FTAG); 429 } 430 err = dsl_prop_register(ds, 431 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 432 primary_cache_changed_cb, os); 433 if (err == 0) { 434 err = dsl_prop_register(ds, 435 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 436 secondary_cache_changed_cb, os); 437 } 438 if (!ds->ds_is_snapshot) { 439 if (err == 0) { 440 err = dsl_prop_register(ds, 441 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 442 checksum_changed_cb, os); 443 } 444 if (err == 0) { 445 err = dsl_prop_register(ds, 446 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 447 compression_changed_cb, os); 448 } 449 if (err == 0) { 450 err = dsl_prop_register(ds, 451 zfs_prop_to_name(ZFS_PROP_COPIES), 452 copies_changed_cb, os); 453 } 454 if (err == 0) { 455 err = dsl_prop_register(ds, 456 zfs_prop_to_name(ZFS_PROP_DEDUP), 457 dedup_changed_cb, os); 458 } 459 if (err == 0) { 460 err = dsl_prop_register(ds, 461 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 462 logbias_changed_cb, os); 463 } 464 if (err == 0) { 465 err = dsl_prop_register(ds, 466 zfs_prop_to_name(ZFS_PROP_SYNC), 467 sync_changed_cb, os); 468 } 469 if (err == 0) { 470 err = dsl_prop_register(ds, 471 zfs_prop_to_name( 472 ZFS_PROP_REDUNDANT_METADATA), 473 redundant_metadata_changed_cb, os); 474 } 475 if (err == 0) { 476 err = dsl_prop_register(ds, 477 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), 478 recordsize_changed_cb, os); 479 } 480 } 481 if (needlock) 482 dsl_pool_config_exit(dmu_objset_pool(os), FTAG); 483 if (err != 0) { 484 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 485 kmem_free(os, sizeof (objset_t)); 486 return (err); 487 } 488 } else { 489 /* It's the meta-objset. */ 490 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 491 os->os_compress = ZIO_COMPRESS_ON; 492 os->os_copies = spa_max_replication(spa); 493 os->os_dedup_checksum = ZIO_CHECKSUM_OFF; 494 os->os_dedup_verify = B_FALSE; 495 os->os_logbias = ZFS_LOGBIAS_LATENCY; 496 os->os_sync = ZFS_SYNC_STANDARD; 497 os->os_primary_cache = ZFS_CACHE_ALL; 498 os->os_secondary_cache = ZFS_CACHE_ALL; 499 } 500 /* 501 * These properties will be filled in by the logic in zfs_get_zplprop() 502 * when they are queried for the first time. 503 */ 504 os->os_version = OBJSET_PROP_UNINITIALIZED; 505 os->os_normalization = OBJSET_PROP_UNINITIALIZED; 506 os->os_utf8only = OBJSET_PROP_UNINITIALIZED; 507 os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED; 508 509 if (ds == NULL || !ds->ds_is_snapshot) 510 os->os_zil_header = os->os_phys->os_zil_header; 511 os->os_zil = zil_alloc(os, &os->os_zil_header); 512 513 for (i = 0; i < TXG_SIZE; i++) { 514 os->os_dirty_dnodes[i] = multilist_create(sizeof (dnode_t), 515 offsetof(dnode_t, dn_dirty_link[i]), 516 dnode_multilist_index_func); 517 } 518 list_create(&os->os_dnodes, sizeof (dnode_t), 519 offsetof(dnode_t, dn_link)); 520 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 521 offsetof(dmu_buf_impl_t, db_link)); 522 523 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); 524 mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL); 525 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 526 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 527 528 dnode_special_open(os, &os->os_phys->os_meta_dnode, 529 DMU_META_DNODE_OBJECT, &os->os_meta_dnode); 530 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { 531 dnode_special_open(os, &os->os_phys->os_userused_dnode, 532 DMU_USERUSED_OBJECT, &os->os_userused_dnode); 533 dnode_special_open(os, &os->os_phys->os_groupused_dnode, 534 DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); 535 } 536 537 *osp = os; 538 return (0); 539 } 540 541 int 542 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) 543 { 544 int err = 0; 545 546 /* 547 * We shouldn't be doing anything with dsl_dataset_t's unless the 548 * pool_config lock is held, or the dataset is long-held. 549 */ 550 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) || 551 dsl_dataset_long_held(ds)); 552 553 mutex_enter(&ds->ds_opening_lock); 554 if (ds->ds_objset == NULL) { 555 objset_t *os; 556 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 557 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 558 ds, dsl_dataset_get_blkptr(ds), &os); 559 rrw_exit(&ds->ds_bp_rwlock, FTAG); 560 561 if (err == 0) { 562 mutex_enter(&ds->ds_lock); 563 ASSERT(ds->ds_objset == NULL); 564 ds->ds_objset = os; 565 mutex_exit(&ds->ds_lock); 566 } 567 } 568 *osp = ds->ds_objset; 569 mutex_exit(&ds->ds_opening_lock); 570 return (err); 571 } 572 573 /* 574 * Holds the pool while the objset is held. Therefore only one objset 575 * can be held at a time. 576 */ 577 int 578 dmu_objset_hold(const char *name, void *tag, objset_t **osp) 579 { 580 dsl_pool_t *dp; 581 dsl_dataset_t *ds; 582 int err; 583 584 err = dsl_pool_hold(name, tag, &dp); 585 if (err != 0) 586 return (err); 587 err = dsl_dataset_hold(dp, name, tag, &ds); 588 if (err != 0) { 589 dsl_pool_rele(dp, tag); 590 return (err); 591 } 592 593 err = dmu_objset_from_ds(ds, osp); 594 if (err != 0) { 595 dsl_dataset_rele(ds, tag); 596 dsl_pool_rele(dp, tag); 597 } 598 599 return (err); 600 } 601 602 static int 603 dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type, 604 boolean_t readonly, void *tag, objset_t **osp) 605 { 606 int err; 607 608 err = dmu_objset_from_ds(ds, osp); 609 if (err != 0) { 610 dsl_dataset_disown(ds, tag); 611 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { 612 dsl_dataset_disown(ds, tag); 613 return (SET_ERROR(EINVAL)); 614 } else if (!readonly && dsl_dataset_is_snapshot(ds)) { 615 dsl_dataset_disown(ds, tag); 616 return (SET_ERROR(EROFS)); 617 } 618 return (err); 619 } 620 621 /* 622 * dsl_pool must not be held when this is called. 623 * Upon successful return, there will be a longhold on the dataset, 624 * and the dsl_pool will not be held. 625 */ 626 int 627 dmu_objset_own(const char *name, dmu_objset_type_t type, 628 boolean_t readonly, void *tag, objset_t **osp) 629 { 630 dsl_pool_t *dp; 631 dsl_dataset_t *ds; 632 int err; 633 634 err = dsl_pool_hold(name, FTAG, &dp); 635 if (err != 0) 636 return (err); 637 err = dsl_dataset_own(dp, name, tag, &ds); 638 if (err != 0) { 639 dsl_pool_rele(dp, FTAG); 640 return (err); 641 } 642 err = dmu_objset_own_impl(ds, type, readonly, tag, osp); 643 dsl_pool_rele(dp, FTAG); 644 645 return (err); 646 } 647 648 int 649 dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type, 650 boolean_t readonly, void *tag, objset_t **osp) 651 { 652 dsl_dataset_t *ds; 653 int err; 654 655 err = dsl_dataset_own_obj(dp, obj, tag, &ds); 656 if (err != 0) 657 return (err); 658 659 return (dmu_objset_own_impl(ds, type, readonly, tag, osp)); 660 } 661 662 void 663 dmu_objset_rele(objset_t *os, void *tag) 664 { 665 dsl_pool_t *dp = dmu_objset_pool(os); 666 dsl_dataset_rele(os->os_dsl_dataset, tag); 667 dsl_pool_rele(dp, tag); 668 } 669 670 /* 671 * When we are called, os MUST refer to an objset associated with a dataset 672 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner 673 * == tag. We will then release and reacquire ownership of the dataset while 674 * holding the pool config_rwlock to avoid intervening namespace or ownership 675 * changes may occur. 676 * 677 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to 678 * release the hold on its dataset and acquire a new one on the dataset of the 679 * same name so that it can be partially torn down and reconstructed. 680 */ 681 void 682 dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds, 683 void *tag) 684 { 685 dsl_pool_t *dp; 686 char name[ZFS_MAX_DATASET_NAME_LEN]; 687 688 VERIFY3P(ds, !=, NULL); 689 VERIFY3P(ds->ds_owner, ==, tag); 690 VERIFY(dsl_dataset_long_held(ds)); 691 692 dsl_dataset_name(ds, name); 693 dp = ds->ds_dir->dd_pool; 694 dsl_pool_config_enter(dp, FTAG); 695 dsl_dataset_disown(ds, tag); 696 VERIFY0(dsl_dataset_own(dp, name, tag, newds)); 697 dsl_pool_config_exit(dp, FTAG); 698 } 699 700 void 701 dmu_objset_disown(objset_t *os, void *tag) 702 { 703 dsl_dataset_disown(os->os_dsl_dataset, tag); 704 } 705 706 void 707 dmu_objset_evict_dbufs(objset_t *os) 708 { 709 dnode_t dn_marker; 710 dnode_t *dn; 711 712 mutex_enter(&os->os_lock); 713 dn = list_head(&os->os_dnodes); 714 while (dn != NULL) { 715 /* 716 * Skip dnodes without holds. We have to do this dance 717 * because dnode_add_ref() only works if there is already a 718 * hold. If the dnode has no holds, then it has no dbufs. 719 */ 720 if (dnode_add_ref(dn, FTAG)) { 721 list_insert_after(&os->os_dnodes, dn, &dn_marker); 722 mutex_exit(&os->os_lock); 723 724 dnode_evict_dbufs(dn); 725 dnode_rele(dn, FTAG); 726 727 mutex_enter(&os->os_lock); 728 dn = list_next(&os->os_dnodes, &dn_marker); 729 list_remove(&os->os_dnodes, &dn_marker); 730 } else { 731 dn = list_next(&os->os_dnodes, dn); 732 } 733 } 734 mutex_exit(&os->os_lock); 735 736 if (DMU_USERUSED_DNODE(os) != NULL) { 737 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os)); 738 dnode_evict_dbufs(DMU_USERUSED_DNODE(os)); 739 } 740 dnode_evict_dbufs(DMU_META_DNODE(os)); 741 } 742 743 /* 744 * Objset eviction processing is split into into two pieces. 745 * The first marks the objset as evicting, evicts any dbufs that 746 * have a refcount of zero, and then queues up the objset for the 747 * second phase of eviction. Once os->os_dnodes has been cleared by 748 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed. 749 * The second phase closes the special dnodes, dequeues the objset from 750 * the list of those undergoing eviction, and finally frees the objset. 751 * 752 * NOTE: Due to asynchronous eviction processing (invocation of 753 * dnode_buf_pageout()), it is possible for the meta dnode for the 754 * objset to have no holds even though os->os_dnodes is not empty. 755 */ 756 void 757 dmu_objset_evict(objset_t *os) 758 { 759 dsl_dataset_t *ds = os->os_dsl_dataset; 760 761 for (int t = 0; t < TXG_SIZE; t++) 762 ASSERT(!dmu_objset_is_dirty(os, t)); 763 764 if (ds) 765 dsl_prop_unregister_all(ds, os); 766 767 if (os->os_sa) 768 sa_tear_down(os); 769 770 dmu_objset_evict_dbufs(os); 771 772 mutex_enter(&os->os_lock); 773 spa_evicting_os_register(os->os_spa, os); 774 if (list_is_empty(&os->os_dnodes)) { 775 mutex_exit(&os->os_lock); 776 dmu_objset_evict_done(os); 777 } else { 778 mutex_exit(&os->os_lock); 779 } 780 } 781 782 void 783 dmu_objset_evict_done(objset_t *os) 784 { 785 ASSERT3P(list_head(&os->os_dnodes), ==, NULL); 786 787 dnode_special_close(&os->os_meta_dnode); 788 if (DMU_USERUSED_DNODE(os)) { 789 dnode_special_close(&os->os_userused_dnode); 790 dnode_special_close(&os->os_groupused_dnode); 791 } 792 zil_free(os->os_zil); 793 794 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); 795 796 /* 797 * This is a barrier to prevent the objset from going away in 798 * dnode_move() until we can safely ensure that the objset is still in 799 * use. We consider the objset valid before the barrier and invalid 800 * after the barrier. 801 */ 802 rw_enter(&os_lock, RW_READER); 803 rw_exit(&os_lock); 804 805 mutex_destroy(&os->os_lock); 806 mutex_destroy(&os->os_userused_lock); 807 mutex_destroy(&os->os_obj_lock); 808 mutex_destroy(&os->os_user_ptr_lock); 809 for (int i = 0; i < TXG_SIZE; i++) { 810 multilist_destroy(os->os_dirty_dnodes[i]); 811 } 812 spa_evicting_os_deregister(os->os_spa, os); 813 kmem_free(os, sizeof (objset_t)); 814 } 815 816 timestruc_t 817 dmu_objset_snap_cmtime(objset_t *os) 818 { 819 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); 820 } 821 822 /* called from dsl for meta-objset */ 823 objset_t * 824 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 825 dmu_objset_type_t type, dmu_tx_t *tx) 826 { 827 objset_t *os; 828 dnode_t *mdn; 829 830 ASSERT(dmu_tx_is_syncing(tx)); 831 832 if (ds != NULL) 833 VERIFY0(dmu_objset_from_ds(ds, &os)); 834 else 835 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); 836 837 mdn = DMU_META_DNODE(os); 838 839 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 840 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 841 842 /* 843 * We don't want to have to increase the meta-dnode's nlevels 844 * later, because then we could do it in quescing context while 845 * we are also accessing it in open context. 846 * 847 * This precaution is not necessary for the MOS (ds == NULL), 848 * because the MOS is only updated in syncing context. 849 * This is most fortunate: the MOS is the only objset that 850 * needs to be synced multiple times as spa_sync() iterates 851 * to convergence, so minimizing its dn_nlevels matters. 852 */ 853 if (ds != NULL) { 854 int levels = 1; 855 856 /* 857 * Determine the number of levels necessary for the meta-dnode 858 * to contain DN_MAX_OBJECT dnodes. Note that in order to 859 * ensure that we do not overflow 64 bits, there has to be 860 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT 861 * but < 2^64. Therefore, 862 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be 863 * less than (64 - log2(DN_MAX_OBJECT)) (16). 864 */ 865 while ((uint64_t)mdn->dn_nblkptr << 866 (mdn->dn_datablkshift - DNODE_SHIFT + 867 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 868 DN_MAX_OBJECT) 869 levels++; 870 871 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 872 mdn->dn_nlevels = levels; 873 } 874 875 ASSERT(type != DMU_OST_NONE); 876 ASSERT(type != DMU_OST_ANY); 877 ASSERT(type < DMU_OST_NUMTYPES); 878 os->os_phys->os_type = type; 879 if (dmu_objset_userused_enabled(os)) { 880 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 881 os->os_flags = os->os_phys->os_flags; 882 } 883 884 dsl_dataset_dirty(ds, tx); 885 886 return (os); 887 } 888 889 typedef struct dmu_objset_create_arg { 890 const char *doca_name; 891 cred_t *doca_cred; 892 void (*doca_userfunc)(objset_t *os, void *arg, 893 cred_t *cr, dmu_tx_t *tx); 894 void *doca_userarg; 895 dmu_objset_type_t doca_type; 896 uint64_t doca_flags; 897 } dmu_objset_create_arg_t; 898 899 /*ARGSUSED*/ 900 static int 901 dmu_objset_create_check(void *arg, dmu_tx_t *tx) 902 { 903 dmu_objset_create_arg_t *doca = arg; 904 dsl_pool_t *dp = dmu_tx_pool(tx); 905 dsl_dir_t *pdd; 906 const char *tail; 907 int error; 908 909 if (strchr(doca->doca_name, '@') != NULL) 910 return (SET_ERROR(EINVAL)); 911 912 if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN) 913 return (SET_ERROR(ENAMETOOLONG)); 914 915 if (dataset_nestcheck(doca->doca_name) != 0) 916 return (SET_ERROR(ENAMETOOLONG)); 917 918 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); 919 if (error != 0) 920 return (error); 921 if (tail == NULL) { 922 dsl_dir_rele(pdd, FTAG); 923 return (SET_ERROR(EEXIST)); 924 } 925 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 926 doca->doca_cred); 927 dsl_dir_rele(pdd, FTAG); 928 929 return (error); 930 } 931 932 static void 933 dmu_objset_create_sync(void *arg, dmu_tx_t *tx) 934 { 935 dmu_objset_create_arg_t *doca = arg; 936 dsl_pool_t *dp = dmu_tx_pool(tx); 937 dsl_dir_t *pdd; 938 const char *tail; 939 dsl_dataset_t *ds; 940 uint64_t obj; 941 blkptr_t *bp; 942 objset_t *os; 943 944 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); 945 946 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, 947 doca->doca_cred, tx); 948 949 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 950 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 951 bp = dsl_dataset_get_blkptr(ds); 952 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, 953 ds, bp, doca->doca_type, tx); 954 rrw_exit(&ds->ds_bp_rwlock, FTAG); 955 956 if (doca->doca_userfunc != NULL) { 957 doca->doca_userfunc(os, doca->doca_userarg, 958 doca->doca_cred, tx); 959 } 960 961 spa_history_log_internal_ds(ds, "create", tx, ""); 962 dsl_dataset_rele(ds, FTAG); 963 dsl_dir_rele(pdd, FTAG); 964 } 965 966 int 967 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 968 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 969 { 970 dmu_objset_create_arg_t doca; 971 972 doca.doca_name = name; 973 doca.doca_cred = CRED(); 974 doca.doca_flags = flags; 975 doca.doca_userfunc = func; 976 doca.doca_userarg = arg; 977 doca.doca_type = type; 978 979 return (dsl_sync_task(name, 980 dmu_objset_create_check, dmu_objset_create_sync, &doca, 981 5, ZFS_SPACE_CHECK_NORMAL)); 982 } 983 984 typedef struct dmu_objset_clone_arg { 985 const char *doca_clone; 986 const char *doca_origin; 987 cred_t *doca_cred; 988 } dmu_objset_clone_arg_t; 989 990 /*ARGSUSED*/ 991 static int 992 dmu_objset_clone_check(void *arg, dmu_tx_t *tx) 993 { 994 dmu_objset_clone_arg_t *doca = arg; 995 dsl_dir_t *pdd; 996 const char *tail; 997 int error; 998 dsl_dataset_t *origin; 999 dsl_pool_t *dp = dmu_tx_pool(tx); 1000 1001 if (strchr(doca->doca_clone, '@') != NULL) 1002 return (SET_ERROR(EINVAL)); 1003 1004 if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN) 1005 return (SET_ERROR(ENAMETOOLONG)); 1006 1007 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); 1008 if (error != 0) 1009 return (error); 1010 if (tail == NULL) { 1011 dsl_dir_rele(pdd, FTAG); 1012 return (SET_ERROR(EEXIST)); 1013 } 1014 1015 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 1016 doca->doca_cred); 1017 if (error != 0) { 1018 dsl_dir_rele(pdd, FTAG); 1019 return (SET_ERROR(EDQUOT)); 1020 } 1021 dsl_dir_rele(pdd, FTAG); 1022 1023 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); 1024 if (error != 0) 1025 return (error); 1026 1027 /* You can only clone snapshots, not the head datasets. */ 1028 if (!origin->ds_is_snapshot) { 1029 dsl_dataset_rele(origin, FTAG); 1030 return (SET_ERROR(EINVAL)); 1031 } 1032 dsl_dataset_rele(origin, FTAG); 1033 1034 return (0); 1035 } 1036 1037 static void 1038 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) 1039 { 1040 dmu_objset_clone_arg_t *doca = arg; 1041 dsl_pool_t *dp = dmu_tx_pool(tx); 1042 dsl_dir_t *pdd; 1043 const char *tail; 1044 dsl_dataset_t *origin, *ds; 1045 uint64_t obj; 1046 char namebuf[ZFS_MAX_DATASET_NAME_LEN]; 1047 1048 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); 1049 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); 1050 1051 obj = dsl_dataset_create_sync(pdd, tail, origin, 0, 1052 doca->doca_cred, tx); 1053 1054 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 1055 dsl_dataset_name(origin, namebuf); 1056 spa_history_log_internal_ds(ds, "clone", tx, 1057 "origin=%s (%llu)", namebuf, origin->ds_object); 1058 dsl_dataset_rele(ds, FTAG); 1059 dsl_dataset_rele(origin, FTAG); 1060 dsl_dir_rele(pdd, FTAG); 1061 } 1062 1063 int 1064 dmu_objset_clone(const char *clone, const char *origin) 1065 { 1066 dmu_objset_clone_arg_t doca; 1067 1068 doca.doca_clone = clone; 1069 doca.doca_origin = origin; 1070 doca.doca_cred = CRED(); 1071 1072 return (dsl_sync_task(clone, 1073 dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 1074 5, ZFS_SPACE_CHECK_NORMAL)); 1075 } 1076 1077 static int 1078 dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg) 1079 { 1080 int error = 0; 1081 uint64_t object = 0; 1082 while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { 1083 error = dmu_object_remap_indirects(os, object, 1084 last_removed_txg); 1085 /* 1086 * If the ZPL removed the object before we managed to dnode_hold 1087 * it, we would get an ENOENT. If the ZPL declares its intent 1088 * to remove the object (dnode_free) before we manage to 1089 * dnode_hold it, we would get an EEXIST. In either case, we 1090 * want to continue remapping the other objects in the objset; 1091 * in all other cases, we want to break early. 1092 */ 1093 if (error != 0 && error != ENOENT && error != EEXIST) { 1094 break; 1095 } 1096 } 1097 if (error == ESRCH) { 1098 error = 0; 1099 } 1100 return (error); 1101 } 1102 1103 int 1104 dmu_objset_remap_indirects(const char *fsname) 1105 { 1106 int error = 0; 1107 objset_t *os = NULL; 1108 uint64_t last_removed_txg; 1109 uint64_t remap_start_txg; 1110 dsl_dir_t *dd; 1111 1112 error = dmu_objset_hold(fsname, FTAG, &os); 1113 if (error != 0) { 1114 return (error); 1115 } 1116 dd = dmu_objset_ds(os)->ds_dir; 1117 1118 if (!spa_feature_is_enabled(dmu_objset_spa(os), 1119 SPA_FEATURE_OBSOLETE_COUNTS)) { 1120 dmu_objset_rele(os, FTAG); 1121 return (SET_ERROR(ENOTSUP)); 1122 } 1123 1124 if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) { 1125 dmu_objset_rele(os, FTAG); 1126 return (SET_ERROR(EINVAL)); 1127 } 1128 1129 /* 1130 * If there has not been a removal, we're done. 1131 */ 1132 last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os)); 1133 if (last_removed_txg == -1ULL) { 1134 dmu_objset_rele(os, FTAG); 1135 return (0); 1136 } 1137 1138 /* 1139 * If we have remapped since the last removal, we're done. 1140 */ 1141 if (dsl_dir_is_zapified(dd)) { 1142 uint64_t last_remap_txg; 1143 if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)), 1144 dd->dd_object, DD_FIELD_LAST_REMAP_TXG, 1145 sizeof (last_remap_txg), 1, &last_remap_txg) == 0 && 1146 last_remap_txg > last_removed_txg) { 1147 dmu_objset_rele(os, FTAG); 1148 return (0); 1149 } 1150 } 1151 1152 dsl_dataset_long_hold(dmu_objset_ds(os), FTAG); 1153 dsl_pool_rele(dmu_objset_pool(os), FTAG); 1154 1155 remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os)); 1156 error = dmu_objset_remap_indirects_impl(os, last_removed_txg); 1157 if (error == 0) { 1158 /* 1159 * We update the last_remap_txg to be the start txg so that 1160 * we can guarantee that every block older than last_remap_txg 1161 * that can be remapped has been remapped. 1162 */ 1163 error = dsl_dir_update_last_remap_txg(dd, remap_start_txg); 1164 } 1165 1166 dsl_dataset_long_rele(dmu_objset_ds(os), FTAG); 1167 dsl_dataset_rele(dmu_objset_ds(os), FTAG); 1168 1169 return (error); 1170 } 1171 1172 int 1173 dmu_objset_snapshot_one(const char *fsname, const char *snapname) 1174 { 1175 int err; 1176 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); 1177 nvlist_t *snaps = fnvlist_alloc(); 1178 1179 fnvlist_add_boolean(snaps, longsnap); 1180 strfree(longsnap); 1181 err = dsl_dataset_snapshot(snaps, NULL, NULL); 1182 fnvlist_free(snaps); 1183 return (err); 1184 } 1185 1186 static void 1187 dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx) 1188 { 1189 dnode_t *dn; 1190 1191 while ((dn = multilist_sublist_head(list)) != NULL) { 1192 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 1193 ASSERT(dn->dn_dbuf->db_data_pending); 1194 /* 1195 * Initialize dn_zio outside dnode_sync() because the 1196 * meta-dnode needs to set it ouside dnode_sync(). 1197 */ 1198 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 1199 ASSERT(dn->dn_zio); 1200 1201 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 1202 multilist_sublist_remove(list, dn); 1203 1204 multilist_t *newlist = dn->dn_objset->os_synced_dnodes; 1205 if (newlist != NULL) { 1206 (void) dnode_add_ref(dn, newlist); 1207 multilist_insert(newlist, dn); 1208 } 1209 1210 dnode_sync(dn, tx); 1211 } 1212 } 1213 1214 /* ARGSUSED */ 1215 static void 1216 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) 1217 { 1218 blkptr_t *bp = zio->io_bp; 1219 objset_t *os = arg; 1220 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 1221 1222 ASSERT(!BP_IS_EMBEDDED(bp)); 1223 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); 1224 ASSERT0(BP_GET_LEVEL(bp)); 1225 1226 /* 1227 * Update rootbp fill count: it should be the number of objects 1228 * allocated in the object set (not counting the "special" 1229 * objects that are stored in the objset_phys_t -- the meta 1230 * dnode and user/group accounting objects). 1231 */ 1232 bp->blk_fill = 0; 1233 for (int i = 0; i < dnp->dn_nblkptr; i++) 1234 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]); 1235 if (os->os_dsl_dataset != NULL) 1236 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG); 1237 *os->os_rootbp = *bp; 1238 if (os->os_dsl_dataset != NULL) 1239 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1240 } 1241 1242 /* ARGSUSED */ 1243 static void 1244 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) 1245 { 1246 blkptr_t *bp = zio->io_bp; 1247 blkptr_t *bp_orig = &zio->io_bp_orig; 1248 objset_t *os = arg; 1249 1250 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 1251 ASSERT(BP_EQUAL(bp, bp_orig)); 1252 } else { 1253 dsl_dataset_t *ds = os->os_dsl_dataset; 1254 dmu_tx_t *tx = os->os_synctx; 1255 1256 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 1257 dsl_dataset_block_born(ds, bp, tx); 1258 } 1259 kmem_free(bp, sizeof (*bp)); 1260 } 1261 1262 typedef struct sync_dnodes_arg { 1263 multilist_t *sda_list; 1264 int sda_sublist_idx; 1265 multilist_t *sda_newlist; 1266 dmu_tx_t *sda_tx; 1267 } sync_dnodes_arg_t; 1268 1269 static void 1270 sync_dnodes_task(void *arg) 1271 { 1272 sync_dnodes_arg_t *sda = arg; 1273 1274 multilist_sublist_t *ms = 1275 multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx); 1276 1277 dmu_objset_sync_dnodes(ms, sda->sda_tx); 1278 1279 multilist_sublist_unlock(ms); 1280 1281 kmem_free(sda, sizeof (*sda)); 1282 } 1283 1284 1285 /* called from dsl */ 1286 void 1287 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) 1288 { 1289 int txgoff; 1290 zbookmark_phys_t zb; 1291 zio_prop_t zp; 1292 zio_t *zio; 1293 list_t *list; 1294 dbuf_dirty_record_t *dr; 1295 blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP); 1296 *blkptr_copy = *os->os_rootbp; 1297 1298 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 1299 1300 ASSERT(dmu_tx_is_syncing(tx)); 1301 /* XXX the write_done callback should really give us the tx... */ 1302 os->os_synctx = tx; 1303 1304 if (os->os_dsl_dataset == NULL) { 1305 /* 1306 * This is the MOS. If we have upgraded, 1307 * spa_max_replication() could change, so reset 1308 * os_copies here. 1309 */ 1310 os->os_copies = spa_max_replication(os->os_spa); 1311 } 1312 1313 /* 1314 * Create the root block IO 1315 */ 1316 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 1317 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1318 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1319 arc_release(os->os_phys_buf, &os->os_phys_buf); 1320 1321 dmu_write_policy(os, NULL, 0, 0, &zp); 1322 1323 zio = arc_write(pio, os->os_spa, tx->tx_txg, 1324 blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), 1325 &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done, 1326 os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 1327 1328 /* 1329 * Sync special dnodes - the parent IO for the sync is the root block 1330 */ 1331 DMU_META_DNODE(os)->dn_zio = zio; 1332 dnode_sync(DMU_META_DNODE(os), tx); 1333 1334 os->os_phys->os_flags = os->os_flags; 1335 1336 if (DMU_USERUSED_DNODE(os) && 1337 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { 1338 DMU_USERUSED_DNODE(os)->dn_zio = zio; 1339 dnode_sync(DMU_USERUSED_DNODE(os), tx); 1340 DMU_GROUPUSED_DNODE(os)->dn_zio = zio; 1341 dnode_sync(DMU_GROUPUSED_DNODE(os), tx); 1342 } 1343 1344 txgoff = tx->tx_txg & TXG_MASK; 1345 1346 if (dmu_objset_userused_enabled(os)) { 1347 /* 1348 * We must create the list here because it uses the 1349 * dn_dirty_link[] of this txg. But it may already 1350 * exist because we call dsl_dataset_sync() twice per txg. 1351 */ 1352 if (os->os_synced_dnodes == NULL) { 1353 os->os_synced_dnodes = 1354 multilist_create(sizeof (dnode_t), 1355 offsetof(dnode_t, dn_dirty_link[txgoff]), 1356 dnode_multilist_index_func); 1357 } else { 1358 ASSERT3U(os->os_synced_dnodes->ml_offset, ==, 1359 offsetof(dnode_t, dn_dirty_link[txgoff])); 1360 } 1361 } 1362 1363 for (int i = 0; 1364 i < multilist_get_num_sublists(os->os_dirty_dnodes[txgoff]); i++) { 1365 sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP); 1366 sda->sda_list = os->os_dirty_dnodes[txgoff]; 1367 sda->sda_sublist_idx = i; 1368 sda->sda_tx = tx; 1369 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq, 1370 sync_dnodes_task, sda, 0); 1371 /* callback frees sda */ 1372 } 1373 taskq_wait(dmu_objset_pool(os)->dp_sync_taskq); 1374 1375 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; 1376 while ((dr = list_head(list)) != NULL) { 1377 ASSERT0(dr->dr_dbuf->db_level); 1378 list_remove(list, dr); 1379 if (dr->dr_zio) 1380 zio_nowait(dr->dr_zio); 1381 } 1382 1383 /* Enable dnode backfill if enough objects have been freed. */ 1384 if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) { 1385 os->os_rescan_dnodes = B_TRUE; 1386 os->os_freed_dnodes = 0; 1387 } 1388 1389 /* 1390 * Free intent log blocks up to this tx. 1391 */ 1392 zil_sync(os->os_zil, tx); 1393 os->os_phys->os_zil_header = os->os_zil_header; 1394 zio_nowait(zio); 1395 } 1396 1397 boolean_t 1398 dmu_objset_is_dirty(objset_t *os, uint64_t txg) 1399 { 1400 return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK])); 1401 } 1402 1403 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; 1404 1405 void 1406 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) 1407 { 1408 used_cbs[ost] = cb; 1409 } 1410 1411 boolean_t 1412 dmu_objset_userused_enabled(objset_t *os) 1413 { 1414 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && 1415 used_cbs[os->os_phys->os_type] != NULL && 1416 DMU_USERUSED_DNODE(os) != NULL); 1417 } 1418 1419 typedef struct userquota_node { 1420 uint64_t uqn_id; 1421 int64_t uqn_delta; 1422 avl_node_t uqn_node; 1423 } userquota_node_t; 1424 1425 typedef struct userquota_cache { 1426 avl_tree_t uqc_user_deltas; 1427 avl_tree_t uqc_group_deltas; 1428 } userquota_cache_t; 1429 1430 static int 1431 userquota_compare(const void *l, const void *r) 1432 { 1433 const userquota_node_t *luqn = l; 1434 const userquota_node_t *ruqn = r; 1435 1436 if (luqn->uqn_id < ruqn->uqn_id) 1437 return (-1); 1438 if (luqn->uqn_id > ruqn->uqn_id) 1439 return (1); 1440 return (0); 1441 } 1442 1443 static void 1444 do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx) 1445 { 1446 void *cookie; 1447 userquota_node_t *uqn; 1448 1449 ASSERT(dmu_tx_is_syncing(tx)); 1450 1451 cookie = NULL; 1452 while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas, 1453 &cookie)) != NULL) { 1454 /* 1455 * os_userused_lock protects against concurrent calls to 1456 * zap_increment_int(). It's needed because zap_increment_int() 1457 * is not thread-safe (i.e. not atomic). 1458 */ 1459 mutex_enter(&os->os_userused_lock); 1460 VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT, 1461 uqn->uqn_id, uqn->uqn_delta, tx)); 1462 mutex_exit(&os->os_userused_lock); 1463 kmem_free(uqn, sizeof (*uqn)); 1464 } 1465 avl_destroy(&cache->uqc_user_deltas); 1466 1467 cookie = NULL; 1468 while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas, 1469 &cookie)) != NULL) { 1470 mutex_enter(&os->os_userused_lock); 1471 VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT, 1472 uqn->uqn_id, uqn->uqn_delta, tx)); 1473 mutex_exit(&os->os_userused_lock); 1474 kmem_free(uqn, sizeof (*uqn)); 1475 } 1476 avl_destroy(&cache->uqc_group_deltas); 1477 } 1478 1479 static void 1480 userquota_update_cache(avl_tree_t *avl, uint64_t id, int64_t delta) 1481 { 1482 userquota_node_t search = { .uqn_id = id }; 1483 avl_index_t idx; 1484 1485 userquota_node_t *uqn = avl_find(avl, &search, &idx); 1486 if (uqn == NULL) { 1487 uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP); 1488 uqn->uqn_id = id; 1489 avl_insert(avl, uqn, idx); 1490 } 1491 uqn->uqn_delta += delta; 1492 } 1493 1494 static void 1495 do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags, 1496 uint64_t user, uint64_t group, boolean_t subtract) 1497 { 1498 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) { 1499 int64_t delta = DNODE_SIZE + used; 1500 if (subtract) 1501 delta = -delta; 1502 1503 userquota_update_cache(&cache->uqc_user_deltas, user, delta); 1504 userquota_update_cache(&cache->uqc_group_deltas, group, delta); 1505 } 1506 } 1507 1508 typedef struct userquota_updates_arg { 1509 objset_t *uua_os; 1510 int uua_sublist_idx; 1511 dmu_tx_t *uua_tx; 1512 } userquota_updates_arg_t; 1513 1514 static void 1515 userquota_updates_task(void *arg) 1516 { 1517 userquota_updates_arg_t *uua = arg; 1518 objset_t *os = uua->uua_os; 1519 dmu_tx_t *tx = uua->uua_tx; 1520 dnode_t *dn; 1521 userquota_cache_t cache = { 0 }; 1522 1523 multilist_sublist_t *list = 1524 multilist_sublist_lock(os->os_synced_dnodes, uua->uua_sublist_idx); 1525 1526 ASSERT(multilist_sublist_head(list) == NULL || 1527 dmu_objset_userused_enabled(os)); 1528 avl_create(&cache.uqc_user_deltas, userquota_compare, 1529 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); 1530 avl_create(&cache.uqc_group_deltas, userquota_compare, 1531 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); 1532 1533 while ((dn = multilist_sublist_head(list)) != NULL) { 1534 int flags; 1535 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); 1536 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || 1537 dn->dn_phys->dn_flags & 1538 DNODE_FLAG_USERUSED_ACCOUNTED); 1539 1540 flags = dn->dn_id_flags; 1541 ASSERT(flags); 1542 if (flags & DN_ID_OLD_EXIST) { 1543 do_userquota_update(&cache, 1544 dn->dn_oldused, dn->dn_oldflags, 1545 dn->dn_olduid, dn->dn_oldgid, B_TRUE); 1546 } 1547 if (flags & DN_ID_NEW_EXIST) { 1548 do_userquota_update(&cache, 1549 DN_USED_BYTES(dn->dn_phys), 1550 dn->dn_phys->dn_flags, dn->dn_newuid, 1551 dn->dn_newgid, B_FALSE); 1552 } 1553 1554 mutex_enter(&dn->dn_mtx); 1555 dn->dn_oldused = 0; 1556 dn->dn_oldflags = 0; 1557 if (dn->dn_id_flags & DN_ID_NEW_EXIST) { 1558 dn->dn_olduid = dn->dn_newuid; 1559 dn->dn_oldgid = dn->dn_newgid; 1560 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1561 if (dn->dn_bonuslen == 0) 1562 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1563 else 1564 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1565 } 1566 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); 1567 mutex_exit(&dn->dn_mtx); 1568 1569 multilist_sublist_remove(list, dn); 1570 dnode_rele(dn, os->os_synced_dnodes); 1571 } 1572 do_userquota_cacheflush(os, &cache, tx); 1573 multilist_sublist_unlock(list); 1574 kmem_free(uua, sizeof (*uua)); 1575 } 1576 1577 void 1578 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) 1579 { 1580 if (!dmu_objset_userused_enabled(os)) 1581 return; 1582 1583 /* Allocate the user/groupused objects if necessary. */ 1584 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { 1585 VERIFY0(zap_create_claim(os, 1586 DMU_USERUSED_OBJECT, 1587 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1588 VERIFY0(zap_create_claim(os, 1589 DMU_GROUPUSED_OBJECT, 1590 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1591 } 1592 1593 for (int i = 0; 1594 i < multilist_get_num_sublists(os->os_synced_dnodes); i++) { 1595 userquota_updates_arg_t *uua = 1596 kmem_alloc(sizeof (*uua), KM_SLEEP); 1597 uua->uua_os = os; 1598 uua->uua_sublist_idx = i; 1599 uua->uua_tx = tx; 1600 /* note: caller does taskq_wait() */ 1601 (void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq, 1602 userquota_updates_task, uua, 0); 1603 /* callback frees uua */ 1604 } 1605 } 1606 1607 /* 1608 * Returns a pointer to data to find uid/gid from 1609 * 1610 * If a dirty record for transaction group that is syncing can't 1611 * be found then NULL is returned. In the NULL case it is assumed 1612 * the uid/gid aren't changing. 1613 */ 1614 static void * 1615 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) 1616 { 1617 dbuf_dirty_record_t *dr, **drp; 1618 void *data; 1619 1620 if (db->db_dirtycnt == 0) 1621 return (db->db.db_data); /* Nothing is changing */ 1622 1623 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1624 if (dr->dr_txg == tx->tx_txg) 1625 break; 1626 1627 if (dr == NULL) { 1628 data = NULL; 1629 } else { 1630 dnode_t *dn; 1631 1632 DB_DNODE_ENTER(dr->dr_dbuf); 1633 dn = DB_DNODE(dr->dr_dbuf); 1634 1635 if (dn->dn_bonuslen == 0 && 1636 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) 1637 data = dr->dt.dl.dr_data->b_data; 1638 else 1639 data = dr->dt.dl.dr_data; 1640 1641 DB_DNODE_EXIT(dr->dr_dbuf); 1642 } 1643 1644 return (data); 1645 } 1646 1647 void 1648 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) 1649 { 1650 objset_t *os = dn->dn_objset; 1651 void *data = NULL; 1652 dmu_buf_impl_t *db = NULL; 1653 uint64_t *user = NULL; 1654 uint64_t *group = NULL; 1655 int flags = dn->dn_id_flags; 1656 int error; 1657 boolean_t have_spill = B_FALSE; 1658 1659 if (!dmu_objset_userused_enabled(dn->dn_objset)) 1660 return; 1661 1662 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| 1663 DN_ID_CHKED_SPILL))) 1664 return; 1665 1666 if (before && dn->dn_bonuslen != 0) 1667 data = DN_BONUS(dn->dn_phys); 1668 else if (!before && dn->dn_bonuslen != 0) { 1669 if (dn->dn_bonus) { 1670 db = dn->dn_bonus; 1671 mutex_enter(&db->db_mtx); 1672 data = dmu_objset_userquota_find_data(db, tx); 1673 } else { 1674 data = DN_BONUS(dn->dn_phys); 1675 } 1676 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { 1677 int rf = 0; 1678 1679 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) 1680 rf |= DB_RF_HAVESTRUCT; 1681 error = dmu_spill_hold_by_dnode(dn, 1682 rf | DB_RF_MUST_SUCCEED, 1683 FTAG, (dmu_buf_t **)&db); 1684 ASSERT(error == 0); 1685 mutex_enter(&db->db_mtx); 1686 data = (before) ? db->db.db_data : 1687 dmu_objset_userquota_find_data(db, tx); 1688 have_spill = B_TRUE; 1689 } else { 1690 mutex_enter(&dn->dn_mtx); 1691 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1692 mutex_exit(&dn->dn_mtx); 1693 return; 1694 } 1695 1696 if (before) { 1697 ASSERT(data); 1698 user = &dn->dn_olduid; 1699 group = &dn->dn_oldgid; 1700 } else if (data) { 1701 user = &dn->dn_newuid; 1702 group = &dn->dn_newgid; 1703 } 1704 1705 /* 1706 * Must always call the callback in case the object 1707 * type has changed and that type isn't an object type to track 1708 */ 1709 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, 1710 user, group); 1711 1712 /* 1713 * Preserve existing uid/gid when the callback can't determine 1714 * what the new uid/gid are and the callback returned EEXIST. 1715 * The EEXIST error tells us to just use the existing uid/gid. 1716 * If we don't know what the old values are then just assign 1717 * them to 0, since that is a new file being created. 1718 */ 1719 if (!before && data == NULL && error == EEXIST) { 1720 if (flags & DN_ID_OLD_EXIST) { 1721 dn->dn_newuid = dn->dn_olduid; 1722 dn->dn_newgid = dn->dn_oldgid; 1723 } else { 1724 dn->dn_newuid = 0; 1725 dn->dn_newgid = 0; 1726 } 1727 error = 0; 1728 } 1729 1730 if (db) 1731 mutex_exit(&db->db_mtx); 1732 1733 mutex_enter(&dn->dn_mtx); 1734 if (error == 0 && before) 1735 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1736 if (error == 0 && !before) 1737 dn->dn_id_flags |= DN_ID_NEW_EXIST; 1738 1739 if (have_spill) { 1740 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1741 } else { 1742 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1743 } 1744 mutex_exit(&dn->dn_mtx); 1745 if (have_spill) 1746 dmu_buf_rele((dmu_buf_t *)db, FTAG); 1747 } 1748 1749 boolean_t 1750 dmu_objset_userspace_present(objset_t *os) 1751 { 1752 return (os->os_phys->os_flags & 1753 OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1754 } 1755 1756 int 1757 dmu_objset_userspace_upgrade(objset_t *os) 1758 { 1759 uint64_t obj; 1760 int err = 0; 1761 1762 if (dmu_objset_userspace_present(os)) 1763 return (0); 1764 if (!dmu_objset_userused_enabled(os)) 1765 return (SET_ERROR(ENOTSUP)); 1766 if (dmu_objset_is_snapshot(os)) 1767 return (SET_ERROR(EINVAL)); 1768 1769 /* 1770 * We simply need to mark every object dirty, so that it will be 1771 * synced out and now accounted. If this is called 1772 * concurrently, or if we already did some work before crashing, 1773 * that's fine, since we track each object's accounted state 1774 * independently. 1775 */ 1776 1777 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 1778 dmu_tx_t *tx; 1779 dmu_buf_t *db; 1780 int objerr; 1781 1782 if (issig(JUSTLOOKING) && issig(FORREAL)) 1783 return (SET_ERROR(EINTR)); 1784 1785 objerr = dmu_bonus_hold(os, obj, FTAG, &db); 1786 if (objerr != 0) 1787 continue; 1788 tx = dmu_tx_create(os); 1789 dmu_tx_hold_bonus(tx, obj); 1790 objerr = dmu_tx_assign(tx, TXG_WAIT); 1791 if (objerr != 0) { 1792 dmu_tx_abort(tx); 1793 continue; 1794 } 1795 dmu_buf_will_dirty(db, tx); 1796 dmu_buf_rele(db, FTAG); 1797 dmu_tx_commit(tx); 1798 } 1799 1800 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 1801 txg_wait_synced(dmu_objset_pool(os), 0); 1802 return (0); 1803 } 1804 1805 void 1806 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 1807 uint64_t *usedobjsp, uint64_t *availobjsp) 1808 { 1809 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, 1810 usedobjsp, availobjsp); 1811 } 1812 1813 uint64_t 1814 dmu_objset_fsid_guid(objset_t *os) 1815 { 1816 return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); 1817 } 1818 1819 void 1820 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 1821 { 1822 stat->dds_type = os->os_phys->os_type; 1823 if (os->os_dsl_dataset) 1824 dsl_dataset_fast_stat(os->os_dsl_dataset, stat); 1825 } 1826 1827 void 1828 dmu_objset_stats(objset_t *os, nvlist_t *nv) 1829 { 1830 ASSERT(os->os_dsl_dataset || 1831 os->os_phys->os_type == DMU_OST_META); 1832 1833 if (os->os_dsl_dataset != NULL) 1834 dsl_dataset_stats(os->os_dsl_dataset, nv); 1835 1836 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 1837 os->os_phys->os_type); 1838 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, 1839 dmu_objset_userspace_present(os)); 1840 } 1841 1842 int 1843 dmu_objset_is_snapshot(objset_t *os) 1844 { 1845 if (os->os_dsl_dataset != NULL) 1846 return (os->os_dsl_dataset->ds_is_snapshot); 1847 else 1848 return (B_FALSE); 1849 } 1850 1851 int 1852 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 1853 boolean_t *conflict) 1854 { 1855 dsl_dataset_t *ds = os->os_dsl_dataset; 1856 uint64_t ignored; 1857 1858 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1859 return (SET_ERROR(ENOENT)); 1860 1861 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 1862 dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored, 1863 MT_NORMALIZE, real, maxlen, conflict)); 1864 } 1865 1866 int 1867 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 1868 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 1869 { 1870 dsl_dataset_t *ds = os->os_dsl_dataset; 1871 zap_cursor_t cursor; 1872 zap_attribute_t attr; 1873 1874 ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); 1875 1876 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1877 return (SET_ERROR(ENOENT)); 1878 1879 zap_cursor_init_serialized(&cursor, 1880 ds->ds_dir->dd_pool->dp_meta_objset, 1881 dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp); 1882 1883 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1884 zap_cursor_fini(&cursor); 1885 return (SET_ERROR(ENOENT)); 1886 } 1887 1888 if (strlen(attr.za_name) + 1 > namelen) { 1889 zap_cursor_fini(&cursor); 1890 return (SET_ERROR(ENAMETOOLONG)); 1891 } 1892 1893 (void) strcpy(name, attr.za_name); 1894 if (idp) 1895 *idp = attr.za_first_integer; 1896 if (case_conflict) 1897 *case_conflict = attr.za_normalization_conflict; 1898 zap_cursor_advance(&cursor); 1899 *offp = zap_cursor_serialize(&cursor); 1900 zap_cursor_fini(&cursor); 1901 1902 return (0); 1903 } 1904 1905 int 1906 dmu_dir_list_next(objset_t *os, int namelen, char *name, 1907 uint64_t *idp, uint64_t *offp) 1908 { 1909 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; 1910 zap_cursor_t cursor; 1911 zap_attribute_t attr; 1912 1913 /* there is no next dir on a snapshot! */ 1914 if (os->os_dsl_dataset->ds_object != 1915 dsl_dir_phys(dd)->dd_head_dataset_obj) 1916 return (SET_ERROR(ENOENT)); 1917 1918 zap_cursor_init_serialized(&cursor, 1919 dd->dd_pool->dp_meta_objset, 1920 dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp); 1921 1922 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1923 zap_cursor_fini(&cursor); 1924 return (SET_ERROR(ENOENT)); 1925 } 1926 1927 if (strlen(attr.za_name) + 1 > namelen) { 1928 zap_cursor_fini(&cursor); 1929 return (SET_ERROR(ENAMETOOLONG)); 1930 } 1931 1932 (void) strcpy(name, attr.za_name); 1933 if (idp) 1934 *idp = attr.za_first_integer; 1935 zap_cursor_advance(&cursor); 1936 *offp = zap_cursor_serialize(&cursor); 1937 zap_cursor_fini(&cursor); 1938 1939 return (0); 1940 } 1941 1942 typedef struct dmu_objset_find_ctx { 1943 taskq_t *dc_tq; 1944 dsl_pool_t *dc_dp; 1945 uint64_t dc_ddobj; 1946 char *dc_ddname; /* last component of ddobj's name */ 1947 int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *); 1948 void *dc_arg; 1949 int dc_flags; 1950 kmutex_t *dc_error_lock; 1951 int *dc_error; 1952 } dmu_objset_find_ctx_t; 1953 1954 static void 1955 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp) 1956 { 1957 dsl_pool_t *dp = dcp->dc_dp; 1958 dsl_dir_t *dd; 1959 dsl_dataset_t *ds; 1960 zap_cursor_t zc; 1961 zap_attribute_t *attr; 1962 uint64_t thisobj; 1963 int err = 0; 1964 1965 /* don't process if there already was an error */ 1966 if (*dcp->dc_error != 0) 1967 goto out; 1968 1969 /* 1970 * Note: passing the name (dc_ddname) here is optional, but it 1971 * improves performance because we don't need to call 1972 * zap_value_search() to determine the name. 1973 */ 1974 err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd); 1975 if (err != 0) 1976 goto out; 1977 1978 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1979 if (dd->dd_myname[0] == '$') { 1980 dsl_dir_rele(dd, FTAG); 1981 goto out; 1982 } 1983 1984 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 1985 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1986 1987 /* 1988 * Iterate over all children. 1989 */ 1990 if (dcp->dc_flags & DS_FIND_CHILDREN) { 1991 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1992 dsl_dir_phys(dd)->dd_child_dir_zapobj); 1993 zap_cursor_retrieve(&zc, attr) == 0; 1994 (void) zap_cursor_advance(&zc)) { 1995 ASSERT3U(attr->za_integer_length, ==, 1996 sizeof (uint64_t)); 1997 ASSERT3U(attr->za_num_integers, ==, 1); 1998 1999 dmu_objset_find_ctx_t *child_dcp = 2000 kmem_alloc(sizeof (*child_dcp), KM_SLEEP); 2001 *child_dcp = *dcp; 2002 child_dcp->dc_ddobj = attr->za_first_integer; 2003 child_dcp->dc_ddname = spa_strdup(attr->za_name); 2004 if (dcp->dc_tq != NULL) 2005 (void) taskq_dispatch(dcp->dc_tq, 2006 dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP); 2007 else 2008 dmu_objset_find_dp_impl(child_dcp); 2009 } 2010 zap_cursor_fini(&zc); 2011 } 2012 2013 /* 2014 * Iterate over all snapshots. 2015 */ 2016 if (dcp->dc_flags & DS_FIND_SNAPSHOTS) { 2017 dsl_dataset_t *ds; 2018 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 2019 2020 if (err == 0) { 2021 uint64_t snapobj; 2022 2023 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 2024 dsl_dataset_rele(ds, FTAG); 2025 2026 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 2027 zap_cursor_retrieve(&zc, attr) == 0; 2028 (void) zap_cursor_advance(&zc)) { 2029 ASSERT3U(attr->za_integer_length, ==, 2030 sizeof (uint64_t)); 2031 ASSERT3U(attr->za_num_integers, ==, 1); 2032 2033 err = dsl_dataset_hold_obj(dp, 2034 attr->za_first_integer, FTAG, &ds); 2035 if (err != 0) 2036 break; 2037 err = dcp->dc_func(dp, ds, dcp->dc_arg); 2038 dsl_dataset_rele(ds, FTAG); 2039 if (err != 0) 2040 break; 2041 } 2042 zap_cursor_fini(&zc); 2043 } 2044 } 2045 2046 kmem_free(attr, sizeof (zap_attribute_t)); 2047 2048 if (err != 0) { 2049 dsl_dir_rele(dd, FTAG); 2050 goto out; 2051 } 2052 2053 /* 2054 * Apply to self. 2055 */ 2056 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 2057 2058 /* 2059 * Note: we hold the dir while calling dsl_dataset_hold_obj() so 2060 * that the dir will remain cached, and we won't have to re-instantiate 2061 * it (which could be expensive due to finding its name via 2062 * zap_value_search()). 2063 */ 2064 dsl_dir_rele(dd, FTAG); 2065 if (err != 0) 2066 goto out; 2067 err = dcp->dc_func(dp, ds, dcp->dc_arg); 2068 dsl_dataset_rele(ds, FTAG); 2069 2070 out: 2071 if (err != 0) { 2072 mutex_enter(dcp->dc_error_lock); 2073 /* only keep first error */ 2074 if (*dcp->dc_error == 0) 2075 *dcp->dc_error = err; 2076 mutex_exit(dcp->dc_error_lock); 2077 } 2078 2079 if (dcp->dc_ddname != NULL) 2080 spa_strfree(dcp->dc_ddname); 2081 kmem_free(dcp, sizeof (*dcp)); 2082 } 2083 2084 static void 2085 dmu_objset_find_dp_cb(void *arg) 2086 { 2087 dmu_objset_find_ctx_t *dcp = arg; 2088 dsl_pool_t *dp = dcp->dc_dp; 2089 2090 /* 2091 * We need to get a pool_config_lock here, as there are several 2092 * asssert(pool_config_held) down the stack. Getting a lock via 2093 * dsl_pool_config_enter is risky, as it might be stalled by a 2094 * pending writer. This would deadlock, as the write lock can 2095 * only be granted when our parent thread gives up the lock. 2096 * The _prio interface gives us priority over a pending writer. 2097 */ 2098 dsl_pool_config_enter_prio(dp, FTAG); 2099 2100 dmu_objset_find_dp_impl(dcp); 2101 2102 dsl_pool_config_exit(dp, FTAG); 2103 } 2104 2105 /* 2106 * Find objsets under and including ddobj, call func(ds) on each. 2107 * The order for the enumeration is completely undefined. 2108 * func is called with dsl_pool_config held. 2109 */ 2110 int 2111 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, 2112 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) 2113 { 2114 int error = 0; 2115 taskq_t *tq = NULL; 2116 int ntasks; 2117 dmu_objset_find_ctx_t *dcp; 2118 kmutex_t err_lock; 2119 2120 mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL); 2121 dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP); 2122 dcp->dc_tq = NULL; 2123 dcp->dc_dp = dp; 2124 dcp->dc_ddobj = ddobj; 2125 dcp->dc_ddname = NULL; 2126 dcp->dc_func = func; 2127 dcp->dc_arg = arg; 2128 dcp->dc_flags = flags; 2129 dcp->dc_error_lock = &err_lock; 2130 dcp->dc_error = &error; 2131 2132 if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) { 2133 /* 2134 * In case a write lock is held we can't make use of 2135 * parallelism, as down the stack of the worker threads 2136 * the lock is asserted via dsl_pool_config_held. 2137 * In case of a read lock this is solved by getting a read 2138 * lock in each worker thread, which isn't possible in case 2139 * of a writer lock. So we fall back to the synchronous path 2140 * here. 2141 * In the future it might be possible to get some magic into 2142 * dsl_pool_config_held in a way that it returns true for 2143 * the worker threads so that a single lock held from this 2144 * thread suffices. For now, stay single threaded. 2145 */ 2146 dmu_objset_find_dp_impl(dcp); 2147 mutex_destroy(&err_lock); 2148 2149 return (error); 2150 } 2151 2152 ntasks = dmu_find_threads; 2153 if (ntasks == 0) 2154 ntasks = vdev_count_leaves(dp->dp_spa) * 4; 2155 tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks, 2156 INT_MAX, 0); 2157 if (tq == NULL) { 2158 kmem_free(dcp, sizeof (*dcp)); 2159 mutex_destroy(&err_lock); 2160 2161 return (SET_ERROR(ENOMEM)); 2162 } 2163 dcp->dc_tq = tq; 2164 2165 /* dcp will be freed by task */ 2166 (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP); 2167 2168 /* 2169 * PORTING: this code relies on the property of taskq_wait to wait 2170 * until no more tasks are queued and no more tasks are active. As 2171 * we always queue new tasks from within other tasks, task_wait 2172 * reliably waits for the full recursion to finish, even though we 2173 * enqueue new tasks after taskq_wait has been called. 2174 * On platforms other than illumos, taskq_wait may not have this 2175 * property. 2176 */ 2177 taskq_wait(tq); 2178 taskq_destroy(tq); 2179 mutex_destroy(&err_lock); 2180 2181 return (error); 2182 } 2183 2184 /* 2185 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 2186 * The dp_config_rwlock must not be held when this is called, and it 2187 * will not be held when the callback is called. 2188 * Therefore this function should only be used when the pool is not changing 2189 * (e.g. in syncing context), or the callback can deal with the possible races. 2190 */ 2191 static int 2192 dmu_objset_find_impl(spa_t *spa, const char *name, 2193 int func(const char *, void *), void *arg, int flags) 2194 { 2195 dsl_dir_t *dd; 2196 dsl_pool_t *dp = spa_get_dsl(spa); 2197 dsl_dataset_t *ds; 2198 zap_cursor_t zc; 2199 zap_attribute_t *attr; 2200 char *child; 2201 uint64_t thisobj; 2202 int err; 2203 2204 dsl_pool_config_enter(dp, FTAG); 2205 2206 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); 2207 if (err != 0) { 2208 dsl_pool_config_exit(dp, FTAG); 2209 return (err); 2210 } 2211 2212 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 2213 if (dd->dd_myname[0] == '$') { 2214 dsl_dir_rele(dd, FTAG); 2215 dsl_pool_config_exit(dp, FTAG); 2216 return (0); 2217 } 2218 2219 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 2220 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 2221 2222 /* 2223 * Iterate over all children. 2224 */ 2225 if (flags & DS_FIND_CHILDREN) { 2226 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2227 dsl_dir_phys(dd)->dd_child_dir_zapobj); 2228 zap_cursor_retrieve(&zc, attr) == 0; 2229 (void) zap_cursor_advance(&zc)) { 2230 ASSERT3U(attr->za_integer_length, ==, 2231 sizeof (uint64_t)); 2232 ASSERT3U(attr->za_num_integers, ==, 1); 2233 2234 child = kmem_asprintf("%s/%s", name, attr->za_name); 2235 dsl_pool_config_exit(dp, FTAG); 2236 err = dmu_objset_find_impl(spa, child, 2237 func, arg, flags); 2238 dsl_pool_config_enter(dp, FTAG); 2239 strfree(child); 2240 if (err != 0) 2241 break; 2242 } 2243 zap_cursor_fini(&zc); 2244 2245 if (err != 0) { 2246 dsl_dir_rele(dd, FTAG); 2247 dsl_pool_config_exit(dp, FTAG); 2248 kmem_free(attr, sizeof (zap_attribute_t)); 2249 return (err); 2250 } 2251 } 2252 2253 /* 2254 * Iterate over all snapshots. 2255 */ 2256 if (flags & DS_FIND_SNAPSHOTS) { 2257 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 2258 2259 if (err == 0) { 2260 uint64_t snapobj; 2261 2262 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 2263 dsl_dataset_rele(ds, FTAG); 2264 2265 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 2266 zap_cursor_retrieve(&zc, attr) == 0; 2267 (void) zap_cursor_advance(&zc)) { 2268 ASSERT3U(attr->za_integer_length, ==, 2269 sizeof (uint64_t)); 2270 ASSERT3U(attr->za_num_integers, ==, 1); 2271 2272 child = kmem_asprintf("%s@%s", 2273 name, attr->za_name); 2274 dsl_pool_config_exit(dp, FTAG); 2275 err = func(child, arg); 2276 dsl_pool_config_enter(dp, FTAG); 2277 strfree(child); 2278 if (err != 0) 2279 break; 2280 } 2281 zap_cursor_fini(&zc); 2282 } 2283 } 2284 2285 dsl_dir_rele(dd, FTAG); 2286 kmem_free(attr, sizeof (zap_attribute_t)); 2287 dsl_pool_config_exit(dp, FTAG); 2288 2289 if (err != 0) 2290 return (err); 2291 2292 /* Apply to self. */ 2293 return (func(name, arg)); 2294 } 2295 2296 /* 2297 * See comment above dmu_objset_find_impl(). 2298 */ 2299 int 2300 dmu_objset_find(char *name, int func(const char *, void *), void *arg, 2301 int flags) 2302 { 2303 spa_t *spa; 2304 int error; 2305 2306 error = spa_open(name, &spa, FTAG); 2307 if (error != 0) 2308 return (error); 2309 error = dmu_objset_find_impl(spa, name, func, arg, flags); 2310 spa_close(spa, FTAG); 2311 return (error); 2312 } 2313 2314 void 2315 dmu_objset_set_user(objset_t *os, void *user_ptr) 2316 { 2317 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 2318 os->os_user_ptr = user_ptr; 2319 } 2320 2321 void * 2322 dmu_objset_get_user(objset_t *os) 2323 { 2324 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 2325 return (os->os_user_ptr); 2326 } 2327 2328 /* 2329 * Determine name of filesystem, given name of snapshot. 2330 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes 2331 */ 2332 int 2333 dmu_fsname(const char *snapname, char *buf) 2334 { 2335 char *atp = strchr(snapname, '@'); 2336 if (atp == NULL) 2337 return (SET_ERROR(EINVAL)); 2338 if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN) 2339 return (SET_ERROR(ENAMETOOLONG)); 2340 (void) strlcpy(buf, snapname, atp - snapname + 1); 2341 return (0); 2342 } 2343 2344 /* 2345 * Call when we think we're going to write/free space in open context to track 2346 * the amount of dirty data in the open txg, which is also the amount 2347 * of memory that can not be evicted until this txg syncs. 2348 */ 2349 void 2350 dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx) 2351 { 2352 dsl_dataset_t *ds = os->os_dsl_dataset; 2353 int64_t aspace = spa_get_worst_case_asize(os->os_spa, space); 2354 2355 if (ds != NULL) { 2356 dsl_dir_willuse_space(ds->ds_dir, aspace, tx); 2357 dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx); 2358 } 2359 } 2360