1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright (c) 2019, Klara Inc. 28 * Copyright (c) 2019, Allan Jude 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/arc.h> 33 #include <sys/dmu.h> 34 #include <sys/dmu_send.h> 35 #include <sys/dmu_impl.h> 36 #include <sys/dbuf.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/dsl_dataset.h> 39 #include <sys/dsl_dir.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/spa.h> 42 #include <sys/zio.h> 43 #include <sys/dmu_zfetch.h> 44 #include <sys/sa.h> 45 #include <sys/sa_impl.h> 46 #include <sys/zfeature.h> 47 #include <sys/blkptr.h> 48 #include <sys/range_tree.h> 49 #include <sys/trace_zfs.h> 50 #include <sys/callb.h> 51 #include <sys/abd.h> 52 #include <sys/vdev.h> 53 #include <cityhash.h> 54 #include <sys/spa_impl.h> 55 #include <sys/wmsum.h> 56 57 kstat_t *dbuf_ksp; 58 59 typedef struct dbuf_stats { 60 /* 61 * Various statistics about the size of the dbuf cache. 62 */ 63 kstat_named_t cache_count; 64 kstat_named_t cache_size_bytes; 65 kstat_named_t cache_size_bytes_max; 66 /* 67 * Statistics regarding the bounds on the dbuf cache size. 68 */ 69 kstat_named_t cache_target_bytes; 70 kstat_named_t cache_lowater_bytes; 71 kstat_named_t cache_hiwater_bytes; 72 /* 73 * Total number of dbuf cache evictions that have occurred. 74 */ 75 kstat_named_t cache_total_evicts; 76 /* 77 * The distribution of dbuf levels in the dbuf cache and 78 * the total size of all dbufs at each level. 79 */ 80 kstat_named_t cache_levels[DN_MAX_LEVELS]; 81 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS]; 82 /* 83 * Statistics about the dbuf hash table. 84 */ 85 kstat_named_t hash_hits; 86 kstat_named_t hash_misses; 87 kstat_named_t hash_collisions; 88 kstat_named_t hash_elements; 89 kstat_named_t hash_elements_max; 90 /* 91 * Number of sublists containing more than one dbuf in the dbuf 92 * hash table. Keep track of the longest hash chain. 93 */ 94 kstat_named_t hash_chains; 95 kstat_named_t hash_chain_max; 96 /* 97 * Number of times a dbuf_create() discovers that a dbuf was 98 * already created and in the dbuf hash table. 99 */ 100 kstat_named_t hash_insert_race; 101 /* 102 * Statistics about the size of the metadata dbuf cache. 103 */ 104 kstat_named_t metadata_cache_count; 105 kstat_named_t metadata_cache_size_bytes; 106 kstat_named_t metadata_cache_size_bytes_max; 107 /* 108 * For diagnostic purposes, this is incremented whenever we can't add 109 * something to the metadata cache because it's full, and instead put 110 * the data in the regular dbuf cache. 111 */ 112 kstat_named_t metadata_cache_overflow; 113 } dbuf_stats_t; 114 115 dbuf_stats_t dbuf_stats = { 116 { "cache_count", KSTAT_DATA_UINT64 }, 117 { "cache_size_bytes", KSTAT_DATA_UINT64 }, 118 { "cache_size_bytes_max", KSTAT_DATA_UINT64 }, 119 { "cache_target_bytes", KSTAT_DATA_UINT64 }, 120 { "cache_lowater_bytes", KSTAT_DATA_UINT64 }, 121 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 }, 122 { "cache_total_evicts", KSTAT_DATA_UINT64 }, 123 { { "cache_levels_N", KSTAT_DATA_UINT64 } }, 124 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } }, 125 { "hash_hits", KSTAT_DATA_UINT64 }, 126 { "hash_misses", KSTAT_DATA_UINT64 }, 127 { "hash_collisions", KSTAT_DATA_UINT64 }, 128 { "hash_elements", KSTAT_DATA_UINT64 }, 129 { "hash_elements_max", KSTAT_DATA_UINT64 }, 130 { "hash_chains", KSTAT_DATA_UINT64 }, 131 { "hash_chain_max", KSTAT_DATA_UINT64 }, 132 { "hash_insert_race", KSTAT_DATA_UINT64 }, 133 { "metadata_cache_count", KSTAT_DATA_UINT64 }, 134 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 }, 135 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 }, 136 { "metadata_cache_overflow", KSTAT_DATA_UINT64 } 137 }; 138 139 struct { 140 wmsum_t cache_count; 141 wmsum_t cache_total_evicts; 142 wmsum_t cache_levels[DN_MAX_LEVELS]; 143 wmsum_t cache_levels_bytes[DN_MAX_LEVELS]; 144 wmsum_t hash_hits; 145 wmsum_t hash_misses; 146 wmsum_t hash_collisions; 147 wmsum_t hash_chains; 148 wmsum_t hash_insert_race; 149 wmsum_t metadata_cache_count; 150 wmsum_t metadata_cache_overflow; 151 } dbuf_sums; 152 153 #define DBUF_STAT_INCR(stat, val) \ 154 wmsum_add(&dbuf_sums.stat, val); 155 #define DBUF_STAT_DECR(stat, val) \ 156 DBUF_STAT_INCR(stat, -(val)); 157 #define DBUF_STAT_BUMP(stat) \ 158 DBUF_STAT_INCR(stat, 1); 159 #define DBUF_STAT_BUMPDOWN(stat) \ 160 DBUF_STAT_INCR(stat, -1); 161 #define DBUF_STAT_MAX(stat, v) { \ 162 uint64_t _m; \ 163 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \ 164 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\ 165 continue; \ 166 } 167 168 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 169 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 170 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr); 171 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags); 172 173 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 174 dmu_buf_evict_func_t *evict_func_sync, 175 dmu_buf_evict_func_t *evict_func_async, 176 dmu_buf_t **clear_on_evict_dbufp); 177 178 /* 179 * Global data structures and functions for the dbuf cache. 180 */ 181 static kmem_cache_t *dbuf_kmem_cache; 182 static taskq_t *dbu_evict_taskq; 183 184 static kthread_t *dbuf_cache_evict_thread; 185 static kmutex_t dbuf_evict_lock; 186 static kcondvar_t dbuf_evict_cv; 187 static boolean_t dbuf_evict_thread_exit; 188 189 /* 190 * There are two dbuf caches; each dbuf can only be in one of them at a time. 191 * 192 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 193 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 194 * that represent the metadata that describes filesystems/snapshots/ 195 * bookmarks/properties/etc. We only evict from this cache when we export a 196 * pool, to short-circuit as much I/O as possible for all administrative 197 * commands that need the metadata. There is no eviction policy for this 198 * cache, because we try to only include types in it which would occupy a 199 * very small amount of space per object but create a large impact on the 200 * performance of these commands. Instead, after it reaches a maximum size 201 * (which should only happen on very small memory systems with a very large 202 * number of filesystem objects), we stop taking new dbufs into the 203 * metadata cache, instead putting them in the normal dbuf cache. 204 * 205 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 206 * are not currently held but have been recently released. These dbufs 207 * are not eligible for arc eviction until they are aged out of the cache. 208 * Dbufs that are aged out of the cache will be immediately destroyed and 209 * become eligible for arc eviction. 210 * 211 * Dbufs are added to these caches once the last hold is released. If a dbuf is 212 * later accessed and still exists in the dbuf cache, then it will be removed 213 * from the cache and later re-added to the head of the cache. 214 * 215 * If a given dbuf meets the requirements for the metadata cache, it will go 216 * there, otherwise it will be considered for the generic LRU dbuf cache. The 217 * caches and the refcounts tracking their sizes are stored in an array indexed 218 * by those caches' matching enum values (from dbuf_cached_state_t). 219 */ 220 typedef struct dbuf_cache { 221 multilist_t cache; 222 zfs_refcount_t size ____cacheline_aligned; 223 } dbuf_cache_t; 224 dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 225 226 /* Size limits for the caches */ 227 unsigned long dbuf_cache_max_bytes = ULONG_MAX; 228 unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX; 229 230 /* Set the default sizes of the caches to log2 fraction of arc size */ 231 int dbuf_cache_shift = 5; 232 int dbuf_metadata_cache_shift = 6; 233 234 static unsigned long dbuf_cache_target_bytes(void); 235 static unsigned long dbuf_metadata_cache_target_bytes(void); 236 237 /* 238 * The LRU dbuf cache uses a three-stage eviction policy: 239 * - A low water marker designates when the dbuf eviction thread 240 * should stop evicting from the dbuf cache. 241 * - When we reach the maximum size (aka mid water mark), we 242 * signal the eviction thread to run. 243 * - The high water mark indicates when the eviction thread 244 * is unable to keep up with the incoming load and eviction must 245 * happen in the context of the calling thread. 246 * 247 * The dbuf cache: 248 * (max size) 249 * low water mid water hi water 250 * +----------------------------------------+----------+----------+ 251 * | | | | 252 * | | | | 253 * | | | | 254 * | | | | 255 * +----------------------------------------+----------+----------+ 256 * stop signal evict 257 * evicting eviction directly 258 * thread 259 * 260 * The high and low water marks indicate the operating range for the eviction 261 * thread. The low water mark is, by default, 90% of the total size of the 262 * cache and the high water mark is at 110% (both of these percentages can be 263 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 264 * respectively). The eviction thread will try to ensure that the cache remains 265 * within this range by waking up every second and checking if the cache is 266 * above the low water mark. The thread can also be woken up by callers adding 267 * elements into the cache if the cache is larger than the mid water (i.e max 268 * cache size). Once the eviction thread is woken up and eviction is required, 269 * it will continue evicting buffers until it's able to reduce the cache size 270 * to the low water mark. If the cache size continues to grow and hits the high 271 * water mark, then callers adding elements to the cache will begin to evict 272 * directly from the cache until the cache is no longer above the high water 273 * mark. 274 */ 275 276 /* 277 * The percentage above and below the maximum cache size. 278 */ 279 uint_t dbuf_cache_hiwater_pct = 10; 280 uint_t dbuf_cache_lowater_pct = 10; 281 282 /* ARGSUSED */ 283 static int 284 dbuf_cons(void *vdb, void *unused, int kmflag) 285 { 286 dmu_buf_impl_t *db = vdb; 287 bzero(db, sizeof (dmu_buf_impl_t)); 288 289 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 290 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL); 291 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 292 multilist_link_init(&db->db_cache_link); 293 zfs_refcount_create(&db->db_holds); 294 295 return (0); 296 } 297 298 /* ARGSUSED */ 299 static void 300 dbuf_dest(void *vdb, void *unused) 301 { 302 dmu_buf_impl_t *db = vdb; 303 mutex_destroy(&db->db_mtx); 304 rw_destroy(&db->db_rwlock); 305 cv_destroy(&db->db_changed); 306 ASSERT(!multilist_link_active(&db->db_cache_link)); 307 zfs_refcount_destroy(&db->db_holds); 308 } 309 310 /* 311 * dbuf hash table routines 312 */ 313 static dbuf_hash_table_t dbuf_hash_table; 314 315 /* 316 * We use Cityhash for this. It's fast, and has good hash properties without 317 * requiring any large static buffers. 318 */ 319 static uint64_t 320 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 321 { 322 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 323 } 324 325 #define DTRACE_SET_STATE(db, why) \ 326 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \ 327 const char *, why) 328 329 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 330 ((dbuf)->db.db_object == (obj) && \ 331 (dbuf)->db_objset == (os) && \ 332 (dbuf)->db_level == (level) && \ 333 (dbuf)->db_blkid == (blkid)) 334 335 dmu_buf_impl_t * 336 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 337 { 338 dbuf_hash_table_t *h = &dbuf_hash_table; 339 uint64_t hv; 340 uint64_t idx; 341 dmu_buf_impl_t *db; 342 343 hv = dbuf_hash(os, obj, level, blkid); 344 idx = hv & h->hash_table_mask; 345 346 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 347 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 348 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 349 mutex_enter(&db->db_mtx); 350 if (db->db_state != DB_EVICTING) { 351 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 352 return (db); 353 } 354 mutex_exit(&db->db_mtx); 355 } 356 } 357 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 358 return (NULL); 359 } 360 361 static dmu_buf_impl_t * 362 dbuf_find_bonus(objset_t *os, uint64_t object) 363 { 364 dnode_t *dn; 365 dmu_buf_impl_t *db = NULL; 366 367 if (dnode_hold(os, object, FTAG, &dn) == 0) { 368 rw_enter(&dn->dn_struct_rwlock, RW_READER); 369 if (dn->dn_bonus != NULL) { 370 db = dn->dn_bonus; 371 mutex_enter(&db->db_mtx); 372 } 373 rw_exit(&dn->dn_struct_rwlock); 374 dnode_rele(dn, FTAG); 375 } 376 return (db); 377 } 378 379 /* 380 * Insert an entry into the hash table. If there is already an element 381 * equal to elem in the hash table, then the already existing element 382 * will be returned and the new element will not be inserted. 383 * Otherwise returns NULL. 384 */ 385 static dmu_buf_impl_t * 386 dbuf_hash_insert(dmu_buf_impl_t *db) 387 { 388 dbuf_hash_table_t *h = &dbuf_hash_table; 389 objset_t *os = db->db_objset; 390 uint64_t obj = db->db.db_object; 391 int level = db->db_level; 392 uint64_t blkid, hv, idx; 393 dmu_buf_impl_t *dbf; 394 uint32_t i; 395 396 blkid = db->db_blkid; 397 hv = dbuf_hash(os, obj, level, blkid); 398 idx = hv & h->hash_table_mask; 399 400 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 401 for (dbf = h->hash_table[idx], i = 0; dbf != NULL; 402 dbf = dbf->db_hash_next, i++) { 403 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 404 mutex_enter(&dbf->db_mtx); 405 if (dbf->db_state != DB_EVICTING) { 406 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 407 return (dbf); 408 } 409 mutex_exit(&dbf->db_mtx); 410 } 411 } 412 413 if (i > 0) { 414 DBUF_STAT_BUMP(hash_collisions); 415 if (i == 1) 416 DBUF_STAT_BUMP(hash_chains); 417 418 DBUF_STAT_MAX(hash_chain_max, i); 419 } 420 421 mutex_enter(&db->db_mtx); 422 db->db_hash_next = h->hash_table[idx]; 423 h->hash_table[idx] = db; 424 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 425 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64); 426 DBUF_STAT_MAX(hash_elements_max, he); 427 428 return (NULL); 429 } 430 431 /* 432 * This returns whether this dbuf should be stored in the metadata cache, which 433 * is based on whether it's from one of the dnode types that store data related 434 * to traversing dataset hierarchies. 435 */ 436 static boolean_t 437 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 438 { 439 DB_DNODE_ENTER(db); 440 dmu_object_type_t type = DB_DNODE(db)->dn_type; 441 DB_DNODE_EXIT(db); 442 443 /* Check if this dbuf is one of the types we care about */ 444 if (DMU_OT_IS_METADATA_CACHED(type)) { 445 /* If we hit this, then we set something up wrong in dmu_ot */ 446 ASSERT(DMU_OT_IS_METADATA(type)); 447 448 /* 449 * Sanity check for small-memory systems: don't allocate too 450 * much memory for this purpose. 451 */ 452 if (zfs_refcount_count( 453 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 454 dbuf_metadata_cache_target_bytes()) { 455 DBUF_STAT_BUMP(metadata_cache_overflow); 456 return (B_FALSE); 457 } 458 459 return (B_TRUE); 460 } 461 462 return (B_FALSE); 463 } 464 465 /* 466 * Remove an entry from the hash table. It must be in the EVICTING state. 467 */ 468 static void 469 dbuf_hash_remove(dmu_buf_impl_t *db) 470 { 471 dbuf_hash_table_t *h = &dbuf_hash_table; 472 uint64_t hv, idx; 473 dmu_buf_impl_t *dbf, **dbp; 474 475 hv = dbuf_hash(db->db_objset, db->db.db_object, 476 db->db_level, db->db_blkid); 477 idx = hv & h->hash_table_mask; 478 479 /* 480 * We mustn't hold db_mtx to maintain lock ordering: 481 * DBUF_HASH_MUTEX > db_mtx. 482 */ 483 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 484 ASSERT(db->db_state == DB_EVICTING); 485 ASSERT(!MUTEX_HELD(&db->db_mtx)); 486 487 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 488 dbp = &h->hash_table[idx]; 489 while ((dbf = *dbp) != db) { 490 dbp = &dbf->db_hash_next; 491 ASSERT(dbf != NULL); 492 } 493 *dbp = db->db_hash_next; 494 db->db_hash_next = NULL; 495 if (h->hash_table[idx] && 496 h->hash_table[idx]->db_hash_next == NULL) 497 DBUF_STAT_BUMPDOWN(hash_chains); 498 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 499 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64); 500 } 501 502 typedef enum { 503 DBVU_EVICTING, 504 DBVU_NOT_EVICTING 505 } dbvu_verify_type_t; 506 507 static void 508 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 509 { 510 #ifdef ZFS_DEBUG 511 int64_t holds; 512 513 if (db->db_user == NULL) 514 return; 515 516 /* Only data blocks support the attachment of user data. */ 517 ASSERT(db->db_level == 0); 518 519 /* Clients must resolve a dbuf before attaching user data. */ 520 ASSERT(db->db.db_data != NULL); 521 ASSERT3U(db->db_state, ==, DB_CACHED); 522 523 holds = zfs_refcount_count(&db->db_holds); 524 if (verify_type == DBVU_EVICTING) { 525 /* 526 * Immediate eviction occurs when holds == dirtycnt. 527 * For normal eviction buffers, holds is zero on 528 * eviction, except when dbuf_fix_old_data() calls 529 * dbuf_clear_data(). However, the hold count can grow 530 * during eviction even though db_mtx is held (see 531 * dmu_bonus_hold() for an example), so we can only 532 * test the generic invariant that holds >= dirtycnt. 533 */ 534 ASSERT3U(holds, >=, db->db_dirtycnt); 535 } else { 536 if (db->db_user_immediate_evict == TRUE) 537 ASSERT3U(holds, >=, db->db_dirtycnt); 538 else 539 ASSERT3U(holds, >, 0); 540 } 541 #endif 542 } 543 544 static void 545 dbuf_evict_user(dmu_buf_impl_t *db) 546 { 547 dmu_buf_user_t *dbu = db->db_user; 548 549 ASSERT(MUTEX_HELD(&db->db_mtx)); 550 551 if (dbu == NULL) 552 return; 553 554 dbuf_verify_user(db, DBVU_EVICTING); 555 db->db_user = NULL; 556 557 #ifdef ZFS_DEBUG 558 if (dbu->dbu_clear_on_evict_dbufp != NULL) 559 *dbu->dbu_clear_on_evict_dbufp = NULL; 560 #endif 561 562 /* 563 * There are two eviction callbacks - one that we call synchronously 564 * and one that we invoke via a taskq. The async one is useful for 565 * avoiding lock order reversals and limiting stack depth. 566 * 567 * Note that if we have a sync callback but no async callback, 568 * it's likely that the sync callback will free the structure 569 * containing the dbu. In that case we need to take care to not 570 * dereference dbu after calling the sync evict func. 571 */ 572 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 573 574 if (dbu->dbu_evict_func_sync != NULL) 575 dbu->dbu_evict_func_sync(dbu); 576 577 if (has_async) { 578 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 579 dbu, 0, &dbu->dbu_tqent); 580 } 581 } 582 583 boolean_t 584 dbuf_is_metadata(dmu_buf_impl_t *db) 585 { 586 /* 587 * Consider indirect blocks and spill blocks to be meta data. 588 */ 589 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { 590 return (B_TRUE); 591 } else { 592 boolean_t is_metadata; 593 594 DB_DNODE_ENTER(db); 595 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 596 DB_DNODE_EXIT(db); 597 598 return (is_metadata); 599 } 600 } 601 602 603 /* 604 * This function *must* return indices evenly distributed between all 605 * sublists of the multilist. This is needed due to how the dbuf eviction 606 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 607 * distributed between all sublists and uses this assumption when 608 * deciding which sublist to evict from and how much to evict from it. 609 */ 610 static unsigned int 611 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 612 { 613 dmu_buf_impl_t *db = obj; 614 615 /* 616 * The assumption here, is the hash value for a given 617 * dmu_buf_impl_t will remain constant throughout it's lifetime 618 * (i.e. it's objset, object, level and blkid fields don't change). 619 * Thus, we don't need to store the dbuf's sublist index 620 * on insertion, as this index can be recalculated on removal. 621 * 622 * Also, the low order bits of the hash value are thought to be 623 * distributed evenly. Otherwise, in the case that the multilist 624 * has a power of two number of sublists, each sublists' usage 625 * would not be evenly distributed. 626 */ 627 return (dbuf_hash(db->db_objset, db->db.db_object, 628 db->db_level, db->db_blkid) % 629 multilist_get_num_sublists(ml)); 630 } 631 632 /* 633 * The target size of the dbuf cache can grow with the ARC target, 634 * unless limited by the tunable dbuf_cache_max_bytes. 635 */ 636 static inline unsigned long 637 dbuf_cache_target_bytes(void) 638 { 639 return (MIN(dbuf_cache_max_bytes, 640 arc_target_bytes() >> dbuf_cache_shift)); 641 } 642 643 /* 644 * The target size of the dbuf metadata cache can grow with the ARC target, 645 * unless limited by the tunable dbuf_metadata_cache_max_bytes. 646 */ 647 static inline unsigned long 648 dbuf_metadata_cache_target_bytes(void) 649 { 650 return (MIN(dbuf_metadata_cache_max_bytes, 651 arc_target_bytes() >> dbuf_metadata_cache_shift)); 652 } 653 654 static inline uint64_t 655 dbuf_cache_hiwater_bytes(void) 656 { 657 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 658 return (dbuf_cache_target + 659 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100); 660 } 661 662 static inline uint64_t 663 dbuf_cache_lowater_bytes(void) 664 { 665 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 666 return (dbuf_cache_target - 667 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100); 668 } 669 670 static inline boolean_t 671 dbuf_cache_above_lowater(void) 672 { 673 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 674 dbuf_cache_lowater_bytes()); 675 } 676 677 /* 678 * Evict the oldest eligible dbuf from the dbuf cache. 679 */ 680 static void 681 dbuf_evict_one(void) 682 { 683 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache); 684 multilist_sublist_t *mls = multilist_sublist_lock( 685 &dbuf_caches[DB_DBUF_CACHE].cache, idx); 686 687 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 688 689 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 690 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 691 db = multilist_sublist_prev(mls, db); 692 } 693 694 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 695 multilist_sublist_t *, mls); 696 697 if (db != NULL) { 698 multilist_sublist_remove(mls, db); 699 multilist_sublist_unlock(mls); 700 (void) zfs_refcount_remove_many( 701 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db); 702 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 703 DBUF_STAT_BUMPDOWN(cache_count); 704 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 705 db->db.db_size); 706 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 707 db->db_caching_status = DB_NO_CACHE; 708 dbuf_destroy(db); 709 DBUF_STAT_BUMP(cache_total_evicts); 710 } else { 711 multilist_sublist_unlock(mls); 712 } 713 } 714 715 /* 716 * The dbuf evict thread is responsible for aging out dbufs from the 717 * cache. Once the cache has reached it's maximum size, dbufs are removed 718 * and destroyed. The eviction thread will continue running until the size 719 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 720 * out of the cache it is destroyed and becomes eligible for arc eviction. 721 */ 722 /* ARGSUSED */ 723 static void 724 dbuf_evict_thread(void *unused) 725 { 726 callb_cpr_t cpr; 727 728 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 729 730 mutex_enter(&dbuf_evict_lock); 731 while (!dbuf_evict_thread_exit) { 732 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 733 CALLB_CPR_SAFE_BEGIN(&cpr); 734 (void) cv_timedwait_idle_hires(&dbuf_evict_cv, 735 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 736 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 737 } 738 mutex_exit(&dbuf_evict_lock); 739 740 /* 741 * Keep evicting as long as we're above the low water mark 742 * for the cache. We do this without holding the locks to 743 * minimize lock contention. 744 */ 745 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 746 dbuf_evict_one(); 747 } 748 749 mutex_enter(&dbuf_evict_lock); 750 } 751 752 dbuf_evict_thread_exit = B_FALSE; 753 cv_broadcast(&dbuf_evict_cv); 754 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 755 thread_exit(); 756 } 757 758 /* 759 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 760 * If the dbuf cache is at its high water mark, then evict a dbuf from the 761 * dbuf cache using the callers context. 762 */ 763 static void 764 dbuf_evict_notify(uint64_t size) 765 { 766 /* 767 * We check if we should evict without holding the dbuf_evict_lock, 768 * because it's OK to occasionally make the wrong decision here, 769 * and grabbing the lock results in massive lock contention. 770 */ 771 if (size > dbuf_cache_target_bytes()) { 772 if (size > dbuf_cache_hiwater_bytes()) 773 dbuf_evict_one(); 774 cv_signal(&dbuf_evict_cv); 775 } 776 } 777 778 static int 779 dbuf_kstat_update(kstat_t *ksp, int rw) 780 { 781 dbuf_stats_t *ds = ksp->ks_data; 782 783 if (rw == KSTAT_WRITE) 784 return (SET_ERROR(EACCES)); 785 786 ds->cache_count.value.ui64 = 787 wmsum_value(&dbuf_sums.cache_count); 788 ds->cache_size_bytes.value.ui64 = 789 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size); 790 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes(); 791 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes(); 792 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes(); 793 ds->cache_total_evicts.value.ui64 = 794 wmsum_value(&dbuf_sums.cache_total_evicts); 795 for (int i = 0; i < DN_MAX_LEVELS; i++) { 796 ds->cache_levels[i].value.ui64 = 797 wmsum_value(&dbuf_sums.cache_levels[i]); 798 ds->cache_levels_bytes[i].value.ui64 = 799 wmsum_value(&dbuf_sums.cache_levels_bytes[i]); 800 } 801 ds->hash_hits.value.ui64 = 802 wmsum_value(&dbuf_sums.hash_hits); 803 ds->hash_misses.value.ui64 = 804 wmsum_value(&dbuf_sums.hash_misses); 805 ds->hash_collisions.value.ui64 = 806 wmsum_value(&dbuf_sums.hash_collisions); 807 ds->hash_chains.value.ui64 = 808 wmsum_value(&dbuf_sums.hash_chains); 809 ds->hash_insert_race.value.ui64 = 810 wmsum_value(&dbuf_sums.hash_insert_race); 811 ds->metadata_cache_count.value.ui64 = 812 wmsum_value(&dbuf_sums.metadata_cache_count); 813 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count( 814 &dbuf_caches[DB_DBUF_METADATA_CACHE].size); 815 ds->metadata_cache_overflow.value.ui64 = 816 wmsum_value(&dbuf_sums.metadata_cache_overflow); 817 return (0); 818 } 819 820 void 821 dbuf_init(void) 822 { 823 uint64_t hsize = 1ULL << 16; 824 dbuf_hash_table_t *h = &dbuf_hash_table; 825 int i; 826 827 /* 828 * The hash table is big enough to fill all of physical memory 829 * with an average block size of zfs_arc_average_blocksize (default 8K). 830 * By default, the table will take up 831 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 832 */ 833 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) 834 hsize <<= 1; 835 836 retry: 837 h->hash_table_mask = hsize - 1; 838 #if defined(_KERNEL) 839 /* 840 * Large allocations which do not require contiguous pages 841 * should be using vmem_alloc() in the linux kernel 842 */ 843 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); 844 #else 845 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 846 #endif 847 if (h->hash_table == NULL) { 848 /* XXX - we should really return an error instead of assert */ 849 ASSERT(hsize > (1ULL << 10)); 850 hsize >>= 1; 851 goto retry; 852 } 853 854 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 855 sizeof (dmu_buf_impl_t), 856 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 857 858 for (i = 0; i < DBUF_MUTEXES; i++) 859 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 860 861 dbuf_stats_init(h); 862 863 /* 864 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 865 * configuration is not required. 866 */ 867 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); 868 869 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 870 multilist_create(&dbuf_caches[dcs].cache, 871 sizeof (dmu_buf_impl_t), 872 offsetof(dmu_buf_impl_t, db_cache_link), 873 dbuf_cache_multilist_index_func); 874 zfs_refcount_create(&dbuf_caches[dcs].size); 875 } 876 877 dbuf_evict_thread_exit = B_FALSE; 878 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 879 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 880 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 881 NULL, 0, &p0, TS_RUN, minclsyspri); 882 883 wmsum_init(&dbuf_sums.cache_count, 0); 884 wmsum_init(&dbuf_sums.cache_total_evicts, 0); 885 for (i = 0; i < DN_MAX_LEVELS; i++) { 886 wmsum_init(&dbuf_sums.cache_levels[i], 0); 887 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0); 888 } 889 wmsum_init(&dbuf_sums.hash_hits, 0); 890 wmsum_init(&dbuf_sums.hash_misses, 0); 891 wmsum_init(&dbuf_sums.hash_collisions, 0); 892 wmsum_init(&dbuf_sums.hash_chains, 0); 893 wmsum_init(&dbuf_sums.hash_insert_race, 0); 894 wmsum_init(&dbuf_sums.metadata_cache_count, 0); 895 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0); 896 897 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc", 898 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t), 899 KSTAT_FLAG_VIRTUAL); 900 if (dbuf_ksp != NULL) { 901 for (i = 0; i < DN_MAX_LEVELS; i++) { 902 snprintf(dbuf_stats.cache_levels[i].name, 903 KSTAT_STRLEN, "cache_level_%d", i); 904 dbuf_stats.cache_levels[i].data_type = 905 KSTAT_DATA_UINT64; 906 snprintf(dbuf_stats.cache_levels_bytes[i].name, 907 KSTAT_STRLEN, "cache_level_%d_bytes", i); 908 dbuf_stats.cache_levels_bytes[i].data_type = 909 KSTAT_DATA_UINT64; 910 } 911 dbuf_ksp->ks_data = &dbuf_stats; 912 dbuf_ksp->ks_update = dbuf_kstat_update; 913 kstat_install(dbuf_ksp); 914 } 915 } 916 917 void 918 dbuf_fini(void) 919 { 920 dbuf_hash_table_t *h = &dbuf_hash_table; 921 int i; 922 923 dbuf_stats_destroy(); 924 925 for (i = 0; i < DBUF_MUTEXES; i++) 926 mutex_destroy(&h->hash_mutexes[i]); 927 #if defined(_KERNEL) 928 /* 929 * Large allocations which do not require contiguous pages 930 * should be using vmem_free() in the linux kernel 931 */ 932 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 933 #else 934 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 935 #endif 936 kmem_cache_destroy(dbuf_kmem_cache); 937 taskq_destroy(dbu_evict_taskq); 938 939 mutex_enter(&dbuf_evict_lock); 940 dbuf_evict_thread_exit = B_TRUE; 941 while (dbuf_evict_thread_exit) { 942 cv_signal(&dbuf_evict_cv); 943 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 944 } 945 mutex_exit(&dbuf_evict_lock); 946 947 mutex_destroy(&dbuf_evict_lock); 948 cv_destroy(&dbuf_evict_cv); 949 950 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 951 zfs_refcount_destroy(&dbuf_caches[dcs].size); 952 multilist_destroy(&dbuf_caches[dcs].cache); 953 } 954 955 if (dbuf_ksp != NULL) { 956 kstat_delete(dbuf_ksp); 957 dbuf_ksp = NULL; 958 } 959 960 wmsum_fini(&dbuf_sums.cache_count); 961 wmsum_fini(&dbuf_sums.cache_total_evicts); 962 for (i = 0; i < DN_MAX_LEVELS; i++) { 963 wmsum_fini(&dbuf_sums.cache_levels[i]); 964 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]); 965 } 966 wmsum_fini(&dbuf_sums.hash_hits); 967 wmsum_fini(&dbuf_sums.hash_misses); 968 wmsum_fini(&dbuf_sums.hash_collisions); 969 wmsum_fini(&dbuf_sums.hash_chains); 970 wmsum_fini(&dbuf_sums.hash_insert_race); 971 wmsum_fini(&dbuf_sums.metadata_cache_count); 972 wmsum_fini(&dbuf_sums.metadata_cache_overflow); 973 } 974 975 /* 976 * Other stuff. 977 */ 978 979 #ifdef ZFS_DEBUG 980 static void 981 dbuf_verify(dmu_buf_impl_t *db) 982 { 983 dnode_t *dn; 984 dbuf_dirty_record_t *dr; 985 uint32_t txg_prev; 986 987 ASSERT(MUTEX_HELD(&db->db_mtx)); 988 989 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 990 return; 991 992 ASSERT(db->db_objset != NULL); 993 DB_DNODE_ENTER(db); 994 dn = DB_DNODE(db); 995 if (dn == NULL) { 996 ASSERT(db->db_parent == NULL); 997 ASSERT(db->db_blkptr == NULL); 998 } else { 999 ASSERT3U(db->db.db_object, ==, dn->dn_object); 1000 ASSERT3P(db->db_objset, ==, dn->dn_objset); 1001 ASSERT3U(db->db_level, <, dn->dn_nlevels); 1002 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 1003 db->db_blkid == DMU_SPILL_BLKID || 1004 !avl_is_empty(&dn->dn_dbufs)); 1005 } 1006 if (db->db_blkid == DMU_BONUS_BLKID) { 1007 ASSERT(dn != NULL); 1008 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1009 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 1010 } else if (db->db_blkid == DMU_SPILL_BLKID) { 1011 ASSERT(dn != NULL); 1012 ASSERT0(db->db.db_offset); 1013 } else { 1014 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 1015 } 1016 1017 if ((dr = list_head(&db->db_dirty_records)) != NULL) { 1018 ASSERT(dr->dr_dbuf == db); 1019 txg_prev = dr->dr_txg; 1020 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL; 1021 dr = list_next(&db->db_dirty_records, dr)) { 1022 ASSERT(dr->dr_dbuf == db); 1023 ASSERT(txg_prev > dr->dr_txg); 1024 txg_prev = dr->dr_txg; 1025 } 1026 } 1027 1028 /* 1029 * We can't assert that db_size matches dn_datablksz because it 1030 * can be momentarily different when another thread is doing 1031 * dnode_set_blksz(). 1032 */ 1033 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 1034 dr = db->db_data_pending; 1035 /* 1036 * It should only be modified in syncing context, so 1037 * make sure we only have one copy of the data. 1038 */ 1039 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 1040 } 1041 1042 /* verify db->db_blkptr */ 1043 if (db->db_blkptr) { 1044 if (db->db_parent == dn->dn_dbuf) { 1045 /* db is pointed to by the dnode */ 1046 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 1047 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 1048 ASSERT(db->db_parent == NULL); 1049 else 1050 ASSERT(db->db_parent != NULL); 1051 if (db->db_blkid != DMU_SPILL_BLKID) 1052 ASSERT3P(db->db_blkptr, ==, 1053 &dn->dn_phys->dn_blkptr[db->db_blkid]); 1054 } else { 1055 /* db is pointed to by an indirect block */ 1056 int epb __maybe_unused = db->db_parent->db.db_size >> 1057 SPA_BLKPTRSHIFT; 1058 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 1059 ASSERT3U(db->db_parent->db.db_object, ==, 1060 db->db.db_object); 1061 /* 1062 * dnode_grow_indblksz() can make this fail if we don't 1063 * have the parent's rwlock. XXX indblksz no longer 1064 * grows. safe to do this now? 1065 */ 1066 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) { 1067 ASSERT3P(db->db_blkptr, ==, 1068 ((blkptr_t *)db->db_parent->db.db_data + 1069 db->db_blkid % epb)); 1070 } 1071 } 1072 } 1073 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 1074 (db->db_buf == NULL || db->db_buf->b_data) && 1075 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 1076 db->db_state != DB_FILL && !dn->dn_free_txg) { 1077 /* 1078 * If the blkptr isn't set but they have nonzero data, 1079 * it had better be dirty, otherwise we'll lose that 1080 * data when we evict this buffer. 1081 * 1082 * There is an exception to this rule for indirect blocks; in 1083 * this case, if the indirect block is a hole, we fill in a few 1084 * fields on each of the child blocks (importantly, birth time) 1085 * to prevent hole birth times from being lost when you 1086 * partially fill in a hole. 1087 */ 1088 if (db->db_dirtycnt == 0) { 1089 if (db->db_level == 0) { 1090 uint64_t *buf = db->db.db_data; 1091 int i; 1092 1093 for (i = 0; i < db->db.db_size >> 3; i++) { 1094 ASSERT(buf[i] == 0); 1095 } 1096 } else { 1097 blkptr_t *bps = db->db.db_data; 1098 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 1099 db->db.db_size); 1100 /* 1101 * We want to verify that all the blkptrs in the 1102 * indirect block are holes, but we may have 1103 * automatically set up a few fields for them. 1104 * We iterate through each blkptr and verify 1105 * they only have those fields set. 1106 */ 1107 for (int i = 0; 1108 i < db->db.db_size / sizeof (blkptr_t); 1109 i++) { 1110 blkptr_t *bp = &bps[i]; 1111 ASSERT(ZIO_CHECKSUM_IS_ZERO( 1112 &bp->blk_cksum)); 1113 ASSERT( 1114 DVA_IS_EMPTY(&bp->blk_dva[0]) && 1115 DVA_IS_EMPTY(&bp->blk_dva[1]) && 1116 DVA_IS_EMPTY(&bp->blk_dva[2])); 1117 ASSERT0(bp->blk_fill); 1118 ASSERT0(bp->blk_pad[0]); 1119 ASSERT0(bp->blk_pad[1]); 1120 ASSERT(!BP_IS_EMBEDDED(bp)); 1121 ASSERT(BP_IS_HOLE(bp)); 1122 ASSERT0(bp->blk_phys_birth); 1123 } 1124 } 1125 } 1126 } 1127 DB_DNODE_EXIT(db); 1128 } 1129 #endif 1130 1131 static void 1132 dbuf_clear_data(dmu_buf_impl_t *db) 1133 { 1134 ASSERT(MUTEX_HELD(&db->db_mtx)); 1135 dbuf_evict_user(db); 1136 ASSERT3P(db->db_buf, ==, NULL); 1137 db->db.db_data = NULL; 1138 if (db->db_state != DB_NOFILL) { 1139 db->db_state = DB_UNCACHED; 1140 DTRACE_SET_STATE(db, "clear data"); 1141 } 1142 } 1143 1144 static void 1145 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 1146 { 1147 ASSERT(MUTEX_HELD(&db->db_mtx)); 1148 ASSERT(buf != NULL); 1149 1150 db->db_buf = buf; 1151 ASSERT(buf->b_data != NULL); 1152 db->db.db_data = buf->b_data; 1153 } 1154 1155 static arc_buf_t * 1156 dbuf_alloc_arcbuf_from_arcbuf(dmu_buf_impl_t *db, arc_buf_t *data) 1157 { 1158 objset_t *os = db->db_objset; 1159 spa_t *spa = os->os_spa; 1160 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1161 enum zio_compress compress_type; 1162 uint8_t complevel; 1163 int psize, lsize; 1164 1165 psize = arc_buf_size(data); 1166 lsize = arc_buf_lsize(data); 1167 compress_type = arc_get_compression(data); 1168 complevel = arc_get_complevel(data); 1169 1170 if (arc_is_encrypted(data)) { 1171 boolean_t byteorder; 1172 uint8_t salt[ZIO_DATA_SALT_LEN]; 1173 uint8_t iv[ZIO_DATA_IV_LEN]; 1174 uint8_t mac[ZIO_DATA_MAC_LEN]; 1175 dnode_t *dn = DB_DNODE(db); 1176 1177 arc_get_raw_params(data, &byteorder, salt, iv, mac); 1178 data = arc_alloc_raw_buf(spa, db, dmu_objset_id(os), 1179 byteorder, salt, iv, mac, dn->dn_type, psize, lsize, 1180 compress_type, complevel); 1181 } else if (compress_type != ZIO_COMPRESS_OFF) { 1182 ASSERT3U(type, ==, ARC_BUFC_DATA); 1183 data = arc_alloc_compressed_buf(spa, db, 1184 psize, lsize, compress_type, complevel); 1185 } else { 1186 data = arc_alloc_buf(spa, db, type, psize); 1187 } 1188 return (data); 1189 } 1190 1191 static arc_buf_t * 1192 dbuf_alloc_arcbuf(dmu_buf_impl_t *db) 1193 { 1194 spa_t *spa = db->db_objset->os_spa; 1195 1196 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 1197 } 1198 1199 /* 1200 * Loan out an arc_buf for read. Return the loaned arc_buf. 1201 */ 1202 arc_buf_t * 1203 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 1204 { 1205 arc_buf_t *abuf; 1206 1207 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1208 mutex_enter(&db->db_mtx); 1209 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { 1210 int blksz = db->db.db_size; 1211 spa_t *spa = db->db_objset->os_spa; 1212 1213 mutex_exit(&db->db_mtx); 1214 abuf = arc_loan_buf(spa, B_FALSE, blksz); 1215 bcopy(db->db.db_data, abuf->b_data, blksz); 1216 } else { 1217 abuf = db->db_buf; 1218 arc_loan_inuse_buf(abuf, db); 1219 db->db_buf = NULL; 1220 dbuf_clear_data(db); 1221 mutex_exit(&db->db_mtx); 1222 } 1223 return (abuf); 1224 } 1225 1226 /* 1227 * Calculate which level n block references the data at the level 0 offset 1228 * provided. 1229 */ 1230 uint64_t 1231 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) 1232 { 1233 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 1234 /* 1235 * The level n blkid is equal to the level 0 blkid divided by 1236 * the number of level 0s in a level n block. 1237 * 1238 * The level 0 blkid is offset >> datablkshift = 1239 * offset / 2^datablkshift. 1240 * 1241 * The number of level 0s in a level n is the number of block 1242 * pointers in an indirect block, raised to the power of level. 1243 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 1244 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 1245 * 1246 * Thus, the level n blkid is: offset / 1247 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT)))) 1248 * = offset / 2^(datablkshift + level * 1249 * (indblkshift - SPA_BLKPTRSHIFT)) 1250 * = offset >> (datablkshift + level * 1251 * (indblkshift - SPA_BLKPTRSHIFT)) 1252 */ 1253 1254 const unsigned exp = dn->dn_datablkshift + 1255 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 1256 1257 if (exp >= 8 * sizeof (offset)) { 1258 /* This only happens on the highest indirection level */ 1259 ASSERT3U(level, ==, dn->dn_nlevels - 1); 1260 return (0); 1261 } 1262 1263 ASSERT3U(exp, <, 8 * sizeof (offset)); 1264 1265 return (offset >> exp); 1266 } else { 1267 ASSERT3U(offset, <, dn->dn_datablksz); 1268 return (0); 1269 } 1270 } 1271 1272 /* 1273 * This function is used to lock the parent of the provided dbuf. This should be 1274 * used when modifying or reading db_blkptr. 1275 */ 1276 db_lock_type_t 1277 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag) 1278 { 1279 enum db_lock_type ret = DLT_NONE; 1280 if (db->db_parent != NULL) { 1281 rw_enter(&db->db_parent->db_rwlock, rw); 1282 ret = DLT_PARENT; 1283 } else if (dmu_objset_ds(db->db_objset) != NULL) { 1284 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw, 1285 tag); 1286 ret = DLT_OBJSET; 1287 } 1288 /* 1289 * We only return a DLT_NONE lock when it's the top-most indirect block 1290 * of the meta-dnode of the MOS. 1291 */ 1292 return (ret); 1293 } 1294 1295 /* 1296 * We need to pass the lock type in because it's possible that the block will 1297 * move from being the topmost indirect block in a dnode (and thus, have no 1298 * parent) to not the top-most via an indirection increase. This would cause a 1299 * panic if we didn't pass the lock type in. 1300 */ 1301 void 1302 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag) 1303 { 1304 if (type == DLT_PARENT) 1305 rw_exit(&db->db_parent->db_rwlock); 1306 else if (type == DLT_OBJSET) 1307 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag); 1308 } 1309 1310 static void 1311 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1312 arc_buf_t *buf, void *vdb) 1313 { 1314 dmu_buf_impl_t *db = vdb; 1315 1316 mutex_enter(&db->db_mtx); 1317 ASSERT3U(db->db_state, ==, DB_READ); 1318 /* 1319 * All reads are synchronous, so we must have a hold on the dbuf 1320 */ 1321 ASSERT(zfs_refcount_count(&db->db_holds) > 0); 1322 ASSERT(db->db_buf == NULL); 1323 ASSERT(db->db.db_data == NULL); 1324 if (buf == NULL) { 1325 /* i/o error */ 1326 ASSERT(zio == NULL || zio->io_error != 0); 1327 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1328 ASSERT3P(db->db_buf, ==, NULL); 1329 db->db_state = DB_UNCACHED; 1330 DTRACE_SET_STATE(db, "i/o error"); 1331 } else if (db->db_level == 0 && db->db_freed_in_flight) { 1332 /* freed in flight */ 1333 ASSERT(zio == NULL || zio->io_error == 0); 1334 arc_release(buf, db); 1335 bzero(buf->b_data, db->db.db_size); 1336 arc_buf_freeze(buf); 1337 db->db_freed_in_flight = FALSE; 1338 dbuf_set_data(db, buf); 1339 db->db_state = DB_CACHED; 1340 DTRACE_SET_STATE(db, "freed in flight"); 1341 } else { 1342 /* success */ 1343 ASSERT(zio == NULL || zio->io_error == 0); 1344 dbuf_set_data(db, buf); 1345 db->db_state = DB_CACHED; 1346 DTRACE_SET_STATE(db, "successful read"); 1347 } 1348 cv_broadcast(&db->db_changed); 1349 dbuf_rele_and_unlock(db, NULL, B_FALSE); 1350 } 1351 1352 /* 1353 * Shortcut for performing reads on bonus dbufs. Returns 1354 * an error if we fail to verify the dnode associated with 1355 * a decrypted block. Otherwise success. 1356 */ 1357 static int 1358 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags) 1359 { 1360 int bonuslen, max_bonuslen, err; 1361 1362 err = dbuf_read_verify_dnode_crypt(db, flags); 1363 if (err) 1364 return (err); 1365 1366 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 1367 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1368 ASSERT(MUTEX_HELD(&db->db_mtx)); 1369 ASSERT(DB_DNODE_HELD(db)); 1370 ASSERT3U(bonuslen, <=, db->db.db_size); 1371 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); 1372 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 1373 if (bonuslen < max_bonuslen) 1374 bzero(db->db.db_data, max_bonuslen); 1375 if (bonuslen) 1376 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 1377 db->db_state = DB_CACHED; 1378 DTRACE_SET_STATE(db, "bonus buffer filled"); 1379 return (0); 1380 } 1381 1382 static void 1383 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn) 1384 { 1385 blkptr_t *bps = db->db.db_data; 1386 uint32_t indbs = 1ULL << dn->dn_indblkshift; 1387 int n_bps = indbs >> SPA_BLKPTRSHIFT; 1388 1389 for (int i = 0; i < n_bps; i++) { 1390 blkptr_t *bp = &bps[i]; 1391 1392 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs); 1393 BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ? 1394 dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr)); 1395 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 1396 BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1); 1397 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 1398 } 1399 } 1400 1401 /* 1402 * Handle reads on dbufs that are holes, if necessary. This function 1403 * requires that the dbuf's mutex is held. Returns success (0) if action 1404 * was taken, ENOENT if no action was taken. 1405 */ 1406 static int 1407 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags) 1408 { 1409 ASSERT(MUTEX_HELD(&db->db_mtx)); 1410 1411 int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr); 1412 /* 1413 * For level 0 blocks only, if the above check fails: 1414 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 1415 * processes the delete record and clears the bp while we are waiting 1416 * for the dn_mtx (resulting in a "no" from block_freed). 1417 */ 1418 if (!is_hole && db->db_level == 0) { 1419 is_hole = dnode_block_freed(dn, db->db_blkid) || 1420 BP_IS_HOLE(db->db_blkptr); 1421 } 1422 1423 if (is_hole) { 1424 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1425 bzero(db->db.db_data, db->db.db_size); 1426 1427 if (db->db_blkptr != NULL && db->db_level > 0 && 1428 BP_IS_HOLE(db->db_blkptr) && 1429 db->db_blkptr->blk_birth != 0) { 1430 dbuf_handle_indirect_hole(db, dn); 1431 } 1432 db->db_state = DB_CACHED; 1433 DTRACE_SET_STATE(db, "hole read satisfied"); 1434 return (0); 1435 } 1436 return (ENOENT); 1437 } 1438 1439 /* 1440 * This function ensures that, when doing a decrypting read of a block, 1441 * we make sure we have decrypted the dnode associated with it. We must do 1442 * this so that we ensure we are fully authenticating the checksum-of-MACs 1443 * tree from the root of the objset down to this block. Indirect blocks are 1444 * always verified against their secure checksum-of-MACs assuming that the 1445 * dnode containing them is correct. Now that we are doing a decrypting read, 1446 * we can be sure that the key is loaded and verify that assumption. This is 1447 * especially important considering that we always read encrypted dnode 1448 * blocks as raw data (without verifying their MACs) to start, and 1449 * decrypt / authenticate them when we need to read an encrypted bonus buffer. 1450 */ 1451 static int 1452 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags) 1453 { 1454 int err = 0; 1455 objset_t *os = db->db_objset; 1456 arc_buf_t *dnode_abuf; 1457 dnode_t *dn; 1458 zbookmark_phys_t zb; 1459 1460 ASSERT(MUTEX_HELD(&db->db_mtx)); 1461 1462 if (!os->os_encrypted || os->os_raw_receive || 1463 (flags & DB_RF_NO_DECRYPT) != 0) 1464 return (0); 1465 1466 DB_DNODE_ENTER(db); 1467 dn = DB_DNODE(db); 1468 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL; 1469 1470 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) { 1471 DB_DNODE_EXIT(db); 1472 return (0); 1473 } 1474 1475 SET_BOOKMARK(&zb, dmu_objset_id(os), 1476 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid); 1477 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE); 1478 1479 /* 1480 * An error code of EACCES tells us that the key is still not 1481 * available. This is ok if we are only reading authenticated 1482 * (and therefore non-encrypted) blocks. 1483 */ 1484 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && 1485 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || 1486 (db->db_blkid == DMU_BONUS_BLKID && 1487 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) 1488 err = 0; 1489 1490 DB_DNODE_EXIT(db); 1491 1492 return (err); 1493 } 1494 1495 /* 1496 * Drops db_mtx and the parent lock specified by dblt and tag before 1497 * returning. 1498 */ 1499 static int 1500 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags, 1501 db_lock_type_t dblt, void *tag) 1502 { 1503 dnode_t *dn; 1504 zbookmark_phys_t zb; 1505 uint32_t aflags = ARC_FLAG_NOWAIT; 1506 int err, zio_flags; 1507 1508 err = zio_flags = 0; 1509 DB_DNODE_ENTER(db); 1510 dn = DB_DNODE(db); 1511 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1512 ASSERT(MUTEX_HELD(&db->db_mtx)); 1513 ASSERT(db->db_state == DB_UNCACHED); 1514 ASSERT(db->db_buf == NULL); 1515 ASSERT(db->db_parent == NULL || 1516 RW_LOCK_HELD(&db->db_parent->db_rwlock)); 1517 1518 if (db->db_blkid == DMU_BONUS_BLKID) { 1519 err = dbuf_read_bonus(db, dn, flags); 1520 goto early_unlock; 1521 } 1522 1523 err = dbuf_read_hole(db, dn, flags); 1524 if (err == 0) 1525 goto early_unlock; 1526 1527 /* 1528 * Any attempt to read a redacted block should result in an error. This 1529 * will never happen under normal conditions, but can be useful for 1530 * debugging purposes. 1531 */ 1532 if (BP_IS_REDACTED(db->db_blkptr)) { 1533 ASSERT(dsl_dataset_feature_is_active( 1534 db->db_objset->os_dsl_dataset, 1535 SPA_FEATURE_REDACTED_DATASETS)); 1536 err = SET_ERROR(EIO); 1537 goto early_unlock; 1538 } 1539 1540 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1541 db->db.db_object, db->db_level, db->db_blkid); 1542 1543 /* 1544 * All bps of an encrypted os should have the encryption bit set. 1545 * If this is not true it indicates tampering and we report an error. 1546 */ 1547 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) { 1548 spa_log_error(db->db_objset->os_spa, &zb); 1549 zfs_panic_recover("unencrypted block in encrypted " 1550 "object set %llu", dmu_objset_id(db->db_objset)); 1551 err = SET_ERROR(EIO); 1552 goto early_unlock; 1553 } 1554 1555 err = dbuf_read_verify_dnode_crypt(db, flags); 1556 if (err != 0) 1557 goto early_unlock; 1558 1559 DB_DNODE_EXIT(db); 1560 1561 db->db_state = DB_READ; 1562 DTRACE_SET_STATE(db, "read issued"); 1563 mutex_exit(&db->db_mtx); 1564 1565 if (DBUF_IS_L2CACHEABLE(db)) 1566 aflags |= ARC_FLAG_L2CACHE; 1567 1568 dbuf_add_ref(db, NULL); 1569 1570 zio_flags = (flags & DB_RF_CANFAIL) ? 1571 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; 1572 1573 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) 1574 zio_flags |= ZIO_FLAG_RAW; 1575 /* 1576 * The zio layer will copy the provided blkptr later, but we need to 1577 * do this now so that we can release the parent's rwlock. We have to 1578 * do that now so that if dbuf_read_done is called synchronously (on 1579 * an l1 cache hit) we don't acquire the db_mtx while holding the 1580 * parent's rwlock, which would be a lock ordering violation. 1581 */ 1582 blkptr_t bp = *db->db_blkptr; 1583 dmu_buf_unlock_parent(db, dblt, tag); 1584 (void) arc_read(zio, db->db_objset->os_spa, &bp, 1585 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, 1586 &aflags, &zb); 1587 return (err); 1588 early_unlock: 1589 DB_DNODE_EXIT(db); 1590 mutex_exit(&db->db_mtx); 1591 dmu_buf_unlock_parent(db, dblt, tag); 1592 return (err); 1593 } 1594 1595 /* 1596 * This is our just-in-time copy function. It makes a copy of buffers that 1597 * have been modified in a previous transaction group before we access them in 1598 * the current active group. 1599 * 1600 * This function is used in three places: when we are dirtying a buffer for the 1601 * first time in a txg, when we are freeing a range in a dnode that includes 1602 * this buffer, and when we are accessing a buffer which was received compressed 1603 * and later referenced in a WRITE_BYREF record. 1604 * 1605 * Note that when we are called from dbuf_free_range() we do not put a hold on 1606 * the buffer, we just traverse the active dbuf list for the dnode. 1607 */ 1608 static void 1609 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1610 { 1611 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 1612 1613 ASSERT(MUTEX_HELD(&db->db_mtx)); 1614 ASSERT(db->db.db_data != NULL); 1615 ASSERT(db->db_level == 0); 1616 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1617 1618 if (dr == NULL || 1619 (dr->dt.dl.dr_data != 1620 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1621 return; 1622 1623 /* 1624 * If the last dirty record for this dbuf has not yet synced 1625 * and its referencing the dbuf data, either: 1626 * reset the reference to point to a new copy, 1627 * or (if there a no active holders) 1628 * just null out the current db_data pointer. 1629 */ 1630 ASSERT3U(dr->dr_txg, >=, txg - 2); 1631 if (db->db_blkid == DMU_BONUS_BLKID) { 1632 dnode_t *dn = DB_DNODE(db); 1633 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1634 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); 1635 arc_space_consume(bonuslen, ARC_SPACE_BONUS); 1636 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); 1637 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1638 arc_buf_t *buf = dbuf_alloc_arcbuf_from_arcbuf(db, db->db_buf); 1639 dr->dt.dl.dr_data = buf; 1640 bcopy(db->db.db_data, buf->b_data, arc_buf_size(buf)); 1641 } else { 1642 db->db_buf = NULL; 1643 dbuf_clear_data(db); 1644 } 1645 } 1646 1647 int 1648 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1649 { 1650 int err = 0; 1651 boolean_t prefetch; 1652 dnode_t *dn; 1653 1654 /* 1655 * We don't have to hold the mutex to check db_state because it 1656 * can't be freed while we have a hold on the buffer. 1657 */ 1658 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1659 1660 if (db->db_state == DB_NOFILL) 1661 return (SET_ERROR(EIO)); 1662 1663 DB_DNODE_ENTER(db); 1664 dn = DB_DNODE(db); 1665 1666 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1667 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 1668 DBUF_IS_CACHEABLE(db); 1669 1670 mutex_enter(&db->db_mtx); 1671 if (db->db_state == DB_CACHED) { 1672 spa_t *spa = dn->dn_objset->os_spa; 1673 1674 /* 1675 * Ensure that this block's dnode has been decrypted if 1676 * the caller has requested decrypted data. 1677 */ 1678 err = dbuf_read_verify_dnode_crypt(db, flags); 1679 1680 /* 1681 * If the arc buf is compressed or encrypted and the caller 1682 * requested uncompressed data, we need to untransform it 1683 * before returning. We also call arc_untransform() on any 1684 * unauthenticated blocks, which will verify their MAC if 1685 * the key is now available. 1686 */ 1687 if (err == 0 && db->db_buf != NULL && 1688 (flags & DB_RF_NO_DECRYPT) == 0 && 1689 (arc_is_encrypted(db->db_buf) || 1690 arc_is_unauthenticated(db->db_buf) || 1691 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 1692 zbookmark_phys_t zb; 1693 1694 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1695 db->db.db_object, db->db_level, db->db_blkid); 1696 dbuf_fix_old_data(db, spa_syncing_txg(spa)); 1697 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 1698 dbuf_set_data(db, db->db_buf); 1699 } 1700 mutex_exit(&db->db_mtx); 1701 if (err == 0 && prefetch) { 1702 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1703 B_FALSE, flags & DB_RF_HAVESTRUCT); 1704 } 1705 DB_DNODE_EXIT(db); 1706 DBUF_STAT_BUMP(hash_hits); 1707 } else if (db->db_state == DB_UNCACHED) { 1708 spa_t *spa = dn->dn_objset->os_spa; 1709 boolean_t need_wait = B_FALSE; 1710 1711 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 1712 1713 if (zio == NULL && 1714 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1715 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1716 need_wait = B_TRUE; 1717 } 1718 err = dbuf_read_impl(db, zio, flags, dblt, FTAG); 1719 /* 1720 * dbuf_read_impl has dropped db_mtx and our parent's rwlock 1721 * for us 1722 */ 1723 if (!err && prefetch) { 1724 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1725 db->db_state != DB_CACHED, 1726 flags & DB_RF_HAVESTRUCT); 1727 } 1728 1729 DB_DNODE_EXIT(db); 1730 DBUF_STAT_BUMP(hash_misses); 1731 1732 /* 1733 * If we created a zio_root we must execute it to avoid 1734 * leaking it, even if it isn't attached to any work due 1735 * to an error in dbuf_read_impl(). 1736 */ 1737 if (need_wait) { 1738 if (err == 0) 1739 err = zio_wait(zio); 1740 else 1741 VERIFY0(zio_wait(zio)); 1742 } 1743 } else { 1744 /* 1745 * Another reader came in while the dbuf was in flight 1746 * between UNCACHED and CACHED. Either a writer will finish 1747 * writing the buffer (sending the dbuf to CACHED) or the 1748 * first reader's request will reach the read_done callback 1749 * and send the dbuf to CACHED. Otherwise, a failure 1750 * occurred and the dbuf went to UNCACHED. 1751 */ 1752 mutex_exit(&db->db_mtx); 1753 if (prefetch) { 1754 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1755 B_TRUE, flags & DB_RF_HAVESTRUCT); 1756 } 1757 DB_DNODE_EXIT(db); 1758 DBUF_STAT_BUMP(hash_misses); 1759 1760 /* Skip the wait per the caller's request. */ 1761 if ((flags & DB_RF_NEVERWAIT) == 0) { 1762 mutex_enter(&db->db_mtx); 1763 while (db->db_state == DB_READ || 1764 db->db_state == DB_FILL) { 1765 ASSERT(db->db_state == DB_READ || 1766 (flags & DB_RF_HAVESTRUCT) == 0); 1767 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1768 db, zio_t *, zio); 1769 cv_wait(&db->db_changed, &db->db_mtx); 1770 } 1771 if (db->db_state == DB_UNCACHED) 1772 err = SET_ERROR(EIO); 1773 mutex_exit(&db->db_mtx); 1774 } 1775 } 1776 1777 return (err); 1778 } 1779 1780 static void 1781 dbuf_noread(dmu_buf_impl_t *db) 1782 { 1783 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1784 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1785 mutex_enter(&db->db_mtx); 1786 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1787 cv_wait(&db->db_changed, &db->db_mtx); 1788 if (db->db_state == DB_UNCACHED) { 1789 ASSERT(db->db_buf == NULL); 1790 ASSERT(db->db.db_data == NULL); 1791 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1792 db->db_state = DB_FILL; 1793 DTRACE_SET_STATE(db, "assigning filled buffer"); 1794 } else if (db->db_state == DB_NOFILL) { 1795 dbuf_clear_data(db); 1796 } else { 1797 ASSERT3U(db->db_state, ==, DB_CACHED); 1798 } 1799 mutex_exit(&db->db_mtx); 1800 } 1801 1802 void 1803 dbuf_unoverride(dbuf_dirty_record_t *dr) 1804 { 1805 dmu_buf_impl_t *db = dr->dr_dbuf; 1806 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1807 uint64_t txg = dr->dr_txg; 1808 1809 ASSERT(MUTEX_HELD(&db->db_mtx)); 1810 /* 1811 * This assert is valid because dmu_sync() expects to be called by 1812 * a zilog's get_data while holding a range lock. This call only 1813 * comes from dbuf_dirty() callers who must also hold a range lock. 1814 */ 1815 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1816 ASSERT(db->db_level == 0); 1817 1818 if (db->db_blkid == DMU_BONUS_BLKID || 1819 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1820 return; 1821 1822 ASSERT(db->db_data_pending != dr); 1823 1824 /* free this block */ 1825 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1826 zio_free(db->db_objset->os_spa, txg, bp); 1827 1828 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1829 dr->dt.dl.dr_nopwrite = B_FALSE; 1830 dr->dt.dl.dr_has_raw_params = B_FALSE; 1831 1832 /* 1833 * Release the already-written buffer, so we leave it in 1834 * a consistent dirty state. Note that all callers are 1835 * modifying the buffer, so they will immediately do 1836 * another (redundant) arc_release(). Therefore, leave 1837 * the buf thawed to save the effort of freezing & 1838 * immediately re-thawing it. 1839 */ 1840 arc_release(dr->dt.dl.dr_data, db); 1841 } 1842 1843 /* 1844 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1845 * data blocks in the free range, so that any future readers will find 1846 * empty blocks. 1847 */ 1848 void 1849 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1850 dmu_tx_t *tx) 1851 { 1852 dmu_buf_impl_t *db_search; 1853 dmu_buf_impl_t *db, *db_next; 1854 uint64_t txg = tx->tx_txg; 1855 avl_index_t where; 1856 dbuf_dirty_record_t *dr; 1857 1858 if (end_blkid > dn->dn_maxblkid && 1859 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1860 end_blkid = dn->dn_maxblkid; 1861 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 1862 1863 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); 1864 db_search->db_level = 0; 1865 db_search->db_blkid = start_blkid; 1866 db_search->db_state = DB_SEARCH; 1867 1868 mutex_enter(&dn->dn_dbufs_mtx); 1869 db = avl_find(&dn->dn_dbufs, db_search, &where); 1870 ASSERT3P(db, ==, NULL); 1871 1872 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1873 1874 for (; db != NULL; db = db_next) { 1875 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1876 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1877 1878 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1879 break; 1880 } 1881 ASSERT3U(db->db_blkid, >=, start_blkid); 1882 1883 /* found a level 0 buffer in the range */ 1884 mutex_enter(&db->db_mtx); 1885 if (dbuf_undirty(db, tx)) { 1886 /* mutex has been dropped and dbuf destroyed */ 1887 continue; 1888 } 1889 1890 if (db->db_state == DB_UNCACHED || 1891 db->db_state == DB_NOFILL || 1892 db->db_state == DB_EVICTING) { 1893 ASSERT(db->db.db_data == NULL); 1894 mutex_exit(&db->db_mtx); 1895 continue; 1896 } 1897 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1898 /* will be handled in dbuf_read_done or dbuf_rele */ 1899 db->db_freed_in_flight = TRUE; 1900 mutex_exit(&db->db_mtx); 1901 continue; 1902 } 1903 if (zfs_refcount_count(&db->db_holds) == 0) { 1904 ASSERT(db->db_buf); 1905 dbuf_destroy(db); 1906 continue; 1907 } 1908 /* The dbuf is referenced */ 1909 1910 dr = list_head(&db->db_dirty_records); 1911 if (dr != NULL) { 1912 if (dr->dr_txg == txg) { 1913 /* 1914 * This buffer is "in-use", re-adjust the file 1915 * size to reflect that this buffer may 1916 * contain new data when we sync. 1917 */ 1918 if (db->db_blkid != DMU_SPILL_BLKID && 1919 db->db_blkid > dn->dn_maxblkid) 1920 dn->dn_maxblkid = db->db_blkid; 1921 dbuf_unoverride(dr); 1922 } else { 1923 /* 1924 * This dbuf is not dirty in the open context. 1925 * Either uncache it (if its not referenced in 1926 * the open context) or reset its contents to 1927 * empty. 1928 */ 1929 dbuf_fix_old_data(db, txg); 1930 } 1931 } 1932 /* clear the contents if its cached */ 1933 if (db->db_state == DB_CACHED) { 1934 ASSERT(db->db.db_data != NULL); 1935 arc_release(db->db_buf, db); 1936 rw_enter(&db->db_rwlock, RW_WRITER); 1937 bzero(db->db.db_data, db->db.db_size); 1938 rw_exit(&db->db_rwlock); 1939 arc_buf_freeze(db->db_buf); 1940 } 1941 1942 mutex_exit(&db->db_mtx); 1943 } 1944 1945 kmem_free(db_search, sizeof (dmu_buf_impl_t)); 1946 mutex_exit(&dn->dn_dbufs_mtx); 1947 } 1948 1949 void 1950 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1951 { 1952 arc_buf_t *buf, *old_buf; 1953 dbuf_dirty_record_t *dr; 1954 int osize = db->db.db_size; 1955 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1956 dnode_t *dn; 1957 1958 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1959 1960 DB_DNODE_ENTER(db); 1961 dn = DB_DNODE(db); 1962 1963 /* 1964 * XXX we should be doing a dbuf_read, checking the return 1965 * value and returning that up to our callers 1966 */ 1967 dmu_buf_will_dirty(&db->db, tx); 1968 1969 /* create the data buffer for the new block */ 1970 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1971 1972 /* copy old block data to the new block */ 1973 old_buf = db->db_buf; 1974 bcopy(old_buf->b_data, buf->b_data, MIN(osize, size)); 1975 /* zero the remainder */ 1976 if (size > osize) 1977 bzero((uint8_t *)buf->b_data + osize, size - osize); 1978 1979 mutex_enter(&db->db_mtx); 1980 dbuf_set_data(db, buf); 1981 arc_buf_destroy(old_buf, db); 1982 db->db.db_size = size; 1983 1984 dr = list_head(&db->db_dirty_records); 1985 /* dirty record added by dmu_buf_will_dirty() */ 1986 VERIFY(dr != NULL); 1987 if (db->db_level == 0) 1988 dr->dt.dl.dr_data = buf; 1989 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 1990 ASSERT3U(dr->dr_accounted, ==, osize); 1991 dr->dr_accounted = size; 1992 mutex_exit(&db->db_mtx); 1993 1994 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1995 DB_DNODE_EXIT(db); 1996 } 1997 1998 void 1999 dbuf_release_bp(dmu_buf_impl_t *db) 2000 { 2001 objset_t *os __maybe_unused = db->db_objset; 2002 2003 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 2004 ASSERT(arc_released(os->os_phys_buf) || 2005 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 2006 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 2007 2008 (void) arc_release(db->db_buf, db); 2009 } 2010 2011 /* 2012 * We already have a dirty record for this TXG, and we are being 2013 * dirtied again. 2014 */ 2015 static void 2016 dbuf_redirty(dbuf_dirty_record_t *dr) 2017 { 2018 dmu_buf_impl_t *db = dr->dr_dbuf; 2019 2020 ASSERT(MUTEX_HELD(&db->db_mtx)); 2021 2022 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 2023 /* 2024 * If this buffer has already been written out, 2025 * we now need to reset its state. 2026 */ 2027 dbuf_unoverride(dr); 2028 if (db->db.db_object != DMU_META_DNODE_OBJECT && 2029 db->db_state != DB_NOFILL) { 2030 /* Already released on initial dirty, so just thaw. */ 2031 ASSERT(arc_released(db->db_buf)); 2032 arc_buf_thaw(db->db_buf); 2033 } 2034 } 2035 } 2036 2037 dbuf_dirty_record_t * 2038 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx) 2039 { 2040 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2041 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid); 2042 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE); 2043 ASSERT(dn->dn_maxblkid >= blkid); 2044 2045 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP); 2046 list_link_init(&dr->dr_dirty_node); 2047 list_link_init(&dr->dr_dbuf_node); 2048 dr->dr_dnode = dn; 2049 dr->dr_txg = tx->tx_txg; 2050 dr->dt.dll.dr_blkid = blkid; 2051 dr->dr_accounted = dn->dn_datablksz; 2052 2053 /* 2054 * There should not be any dbuf for the block that we're dirtying. 2055 * Otherwise the buffer contents could be inconsistent between the 2056 * dbuf and the lightweight dirty record. 2057 */ 2058 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid)); 2059 2060 mutex_enter(&dn->dn_mtx); 2061 int txgoff = tx->tx_txg & TXG_MASK; 2062 if (dn->dn_free_ranges[txgoff] != NULL) { 2063 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1); 2064 } 2065 2066 if (dn->dn_nlevels == 1) { 2067 ASSERT3U(blkid, <, dn->dn_nblkptr); 2068 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2069 mutex_exit(&dn->dn_mtx); 2070 rw_exit(&dn->dn_struct_rwlock); 2071 dnode_setdirty(dn, tx); 2072 } else { 2073 mutex_exit(&dn->dn_mtx); 2074 2075 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2076 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn, 2077 1, blkid >> epbs, FTAG); 2078 rw_exit(&dn->dn_struct_rwlock); 2079 if (parent_db == NULL) { 2080 kmem_free(dr, sizeof (*dr)); 2081 return (NULL); 2082 } 2083 int err = dbuf_read(parent_db, NULL, 2084 (DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2085 if (err != 0) { 2086 dbuf_rele(parent_db, FTAG); 2087 kmem_free(dr, sizeof (*dr)); 2088 return (NULL); 2089 } 2090 2091 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx); 2092 dbuf_rele(parent_db, FTAG); 2093 mutex_enter(&parent_dr->dt.di.dr_mtx); 2094 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg); 2095 list_insert_tail(&parent_dr->dt.di.dr_children, dr); 2096 mutex_exit(&parent_dr->dt.di.dr_mtx); 2097 dr->dr_parent = parent_dr; 2098 } 2099 2100 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx); 2101 2102 return (dr); 2103 } 2104 2105 dbuf_dirty_record_t * 2106 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2107 { 2108 dnode_t *dn; 2109 objset_t *os; 2110 dbuf_dirty_record_t *dr, *dr_next, *dr_head; 2111 int txgoff = tx->tx_txg & TXG_MASK; 2112 boolean_t drop_struct_rwlock = B_FALSE; 2113 2114 ASSERT(tx->tx_txg != 0); 2115 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2116 DMU_TX_DIRTY_BUF(tx, db); 2117 2118 DB_DNODE_ENTER(db); 2119 dn = DB_DNODE(db); 2120 /* 2121 * Shouldn't dirty a regular buffer in syncing context. Private 2122 * objects may be dirtied in syncing context, but only if they 2123 * were already pre-dirtied in open context. 2124 */ 2125 #ifdef ZFS_DEBUG 2126 if (dn->dn_objset->os_dsl_dataset != NULL) { 2127 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 2128 RW_READER, FTAG); 2129 } 2130 ASSERT(!dmu_tx_is_syncing(tx) || 2131 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 2132 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2133 dn->dn_objset->os_dsl_dataset == NULL); 2134 if (dn->dn_objset->os_dsl_dataset != NULL) 2135 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 2136 #endif 2137 /* 2138 * We make this assert for private objects as well, but after we 2139 * check if we're already dirty. They are allowed to re-dirty 2140 * in syncing context. 2141 */ 2142 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2143 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2144 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2145 2146 mutex_enter(&db->db_mtx); 2147 /* 2148 * XXX make this true for indirects too? The problem is that 2149 * transactions created with dmu_tx_create_assigned() from 2150 * syncing context don't bother holding ahead. 2151 */ 2152 ASSERT(db->db_level != 0 || 2153 db->db_state == DB_CACHED || db->db_state == DB_FILL || 2154 db->db_state == DB_NOFILL); 2155 2156 mutex_enter(&dn->dn_mtx); 2157 dnode_set_dirtyctx(dn, tx, db); 2158 if (tx->tx_txg > dn->dn_dirty_txg) 2159 dn->dn_dirty_txg = tx->tx_txg; 2160 mutex_exit(&dn->dn_mtx); 2161 2162 if (db->db_blkid == DMU_SPILL_BLKID) 2163 dn->dn_have_spill = B_TRUE; 2164 2165 /* 2166 * If this buffer is already dirty, we're done. 2167 */ 2168 dr_head = list_head(&db->db_dirty_records); 2169 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg || 2170 db->db.db_object == DMU_META_DNODE_OBJECT); 2171 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg); 2172 if (dr_next && dr_next->dr_txg == tx->tx_txg) { 2173 DB_DNODE_EXIT(db); 2174 2175 dbuf_redirty(dr_next); 2176 mutex_exit(&db->db_mtx); 2177 return (dr_next); 2178 } 2179 2180 /* 2181 * Only valid if not already dirty. 2182 */ 2183 ASSERT(dn->dn_object == 0 || 2184 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2185 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2186 2187 ASSERT3U(dn->dn_nlevels, >, db->db_level); 2188 2189 /* 2190 * We should only be dirtying in syncing context if it's the 2191 * mos or we're initializing the os or it's a special object. 2192 * However, we are allowed to dirty in syncing context provided 2193 * we already dirtied it in open context. Hence we must make 2194 * this assertion only if we're not already dirty. 2195 */ 2196 os = dn->dn_objset; 2197 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 2198 #ifdef ZFS_DEBUG 2199 if (dn->dn_objset->os_dsl_dataset != NULL) 2200 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 2201 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2202 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 2203 if (dn->dn_objset->os_dsl_dataset != NULL) 2204 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 2205 #endif 2206 ASSERT(db->db.db_size != 0); 2207 2208 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2209 2210 if (db->db_blkid != DMU_BONUS_BLKID) { 2211 dmu_objset_willuse_space(os, db->db.db_size, tx); 2212 } 2213 2214 /* 2215 * If this buffer is dirty in an old transaction group we need 2216 * to make a copy of it so that the changes we make in this 2217 * transaction group won't leak out when we sync the older txg. 2218 */ 2219 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 2220 list_link_init(&dr->dr_dirty_node); 2221 list_link_init(&dr->dr_dbuf_node); 2222 dr->dr_dnode = dn; 2223 if (db->db_level == 0) { 2224 void *data_old = db->db_buf; 2225 2226 if (db->db_state != DB_NOFILL) { 2227 if (db->db_blkid == DMU_BONUS_BLKID) { 2228 dbuf_fix_old_data(db, tx->tx_txg); 2229 data_old = db->db.db_data; 2230 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 2231 /* 2232 * Release the data buffer from the cache so 2233 * that we can modify it without impacting 2234 * possible other users of this cached data 2235 * block. Note that indirect blocks and 2236 * private objects are not released until the 2237 * syncing state (since they are only modified 2238 * then). 2239 */ 2240 arc_release(db->db_buf, db); 2241 dbuf_fix_old_data(db, tx->tx_txg); 2242 data_old = db->db_buf; 2243 } 2244 ASSERT(data_old != NULL); 2245 } 2246 dr->dt.dl.dr_data = data_old; 2247 } else { 2248 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); 2249 list_create(&dr->dt.di.dr_children, 2250 sizeof (dbuf_dirty_record_t), 2251 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 2252 } 2253 if (db->db_blkid != DMU_BONUS_BLKID) 2254 dr->dr_accounted = db->db.db_size; 2255 dr->dr_dbuf = db; 2256 dr->dr_txg = tx->tx_txg; 2257 list_insert_before(&db->db_dirty_records, dr_next, dr); 2258 2259 /* 2260 * We could have been freed_in_flight between the dbuf_noread 2261 * and dbuf_dirty. We win, as though the dbuf_noread() had 2262 * happened after the free. 2263 */ 2264 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2265 db->db_blkid != DMU_SPILL_BLKID) { 2266 mutex_enter(&dn->dn_mtx); 2267 if (dn->dn_free_ranges[txgoff] != NULL) { 2268 range_tree_clear(dn->dn_free_ranges[txgoff], 2269 db->db_blkid, 1); 2270 } 2271 mutex_exit(&dn->dn_mtx); 2272 db->db_freed_in_flight = FALSE; 2273 } 2274 2275 /* 2276 * This buffer is now part of this txg 2277 */ 2278 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 2279 db->db_dirtycnt += 1; 2280 ASSERT3U(db->db_dirtycnt, <=, 3); 2281 2282 mutex_exit(&db->db_mtx); 2283 2284 if (db->db_blkid == DMU_BONUS_BLKID || 2285 db->db_blkid == DMU_SPILL_BLKID) { 2286 mutex_enter(&dn->dn_mtx); 2287 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2288 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2289 mutex_exit(&dn->dn_mtx); 2290 dnode_setdirty(dn, tx); 2291 DB_DNODE_EXIT(db); 2292 return (dr); 2293 } 2294 2295 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 2296 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2297 drop_struct_rwlock = B_TRUE; 2298 } 2299 2300 /* 2301 * If we are overwriting a dedup BP, then unless it is snapshotted, 2302 * when we get to syncing context we will need to decrement its 2303 * refcount in the DDT. Prefetch the relevant DDT block so that 2304 * syncing context won't have to wait for the i/o. 2305 */ 2306 if (db->db_blkptr != NULL) { 2307 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 2308 ddt_prefetch(os->os_spa, db->db_blkptr); 2309 dmu_buf_unlock_parent(db, dblt, FTAG); 2310 } 2311 2312 /* 2313 * We need to hold the dn_struct_rwlock to make this assertion, 2314 * because it protects dn_phys / dn_next_nlevels from changing. 2315 */ 2316 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 2317 dn->dn_phys->dn_nlevels > db->db_level || 2318 dn->dn_next_nlevels[txgoff] > db->db_level || 2319 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 2320 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 2321 2322 2323 if (db->db_level == 0) { 2324 ASSERT(!db->db_objset->os_raw_receive || 2325 dn->dn_maxblkid >= db->db_blkid); 2326 dnode_new_blkid(dn, db->db_blkid, tx, 2327 drop_struct_rwlock, B_FALSE); 2328 ASSERT(dn->dn_maxblkid >= db->db_blkid); 2329 } 2330 2331 if (db->db_level+1 < dn->dn_nlevels) { 2332 dmu_buf_impl_t *parent = db->db_parent; 2333 dbuf_dirty_record_t *di; 2334 int parent_held = FALSE; 2335 2336 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 2337 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2338 parent = dbuf_hold_level(dn, db->db_level + 1, 2339 db->db_blkid >> epbs, FTAG); 2340 ASSERT(parent != NULL); 2341 parent_held = TRUE; 2342 } 2343 if (drop_struct_rwlock) 2344 rw_exit(&dn->dn_struct_rwlock); 2345 ASSERT3U(db->db_level + 1, ==, parent->db_level); 2346 di = dbuf_dirty(parent, tx); 2347 if (parent_held) 2348 dbuf_rele(parent, FTAG); 2349 2350 mutex_enter(&db->db_mtx); 2351 /* 2352 * Since we've dropped the mutex, it's possible that 2353 * dbuf_undirty() might have changed this out from under us. 2354 */ 2355 if (list_head(&db->db_dirty_records) == dr || 2356 dn->dn_object == DMU_META_DNODE_OBJECT) { 2357 mutex_enter(&di->dt.di.dr_mtx); 2358 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 2359 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2360 list_insert_tail(&di->dt.di.dr_children, dr); 2361 mutex_exit(&di->dt.di.dr_mtx); 2362 dr->dr_parent = di; 2363 } 2364 mutex_exit(&db->db_mtx); 2365 } else { 2366 ASSERT(db->db_level + 1 == dn->dn_nlevels); 2367 ASSERT(db->db_blkid < dn->dn_nblkptr); 2368 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 2369 mutex_enter(&dn->dn_mtx); 2370 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2371 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2372 mutex_exit(&dn->dn_mtx); 2373 if (drop_struct_rwlock) 2374 rw_exit(&dn->dn_struct_rwlock); 2375 } 2376 2377 dnode_setdirty(dn, tx); 2378 DB_DNODE_EXIT(db); 2379 return (dr); 2380 } 2381 2382 static void 2383 dbuf_undirty_bonus(dbuf_dirty_record_t *dr) 2384 { 2385 dmu_buf_impl_t *db = dr->dr_dbuf; 2386 2387 if (dr->dt.dl.dr_data != db->db.db_data) { 2388 struct dnode *dn = dr->dr_dnode; 2389 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 2390 2391 kmem_free(dr->dt.dl.dr_data, max_bonuslen); 2392 arc_space_return(max_bonuslen, ARC_SPACE_BONUS); 2393 } 2394 db->db_data_pending = NULL; 2395 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 2396 list_remove(&db->db_dirty_records, dr); 2397 if (dr->dr_dbuf->db_level != 0) { 2398 mutex_destroy(&dr->dt.di.dr_mtx); 2399 list_destroy(&dr->dt.di.dr_children); 2400 } 2401 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2402 ASSERT3U(db->db_dirtycnt, >, 0); 2403 db->db_dirtycnt -= 1; 2404 } 2405 2406 /* 2407 * Undirty a buffer in the transaction group referenced by the given 2408 * transaction. Return whether this evicted the dbuf. 2409 */ 2410 static boolean_t 2411 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2412 { 2413 uint64_t txg = tx->tx_txg; 2414 2415 ASSERT(txg != 0); 2416 2417 /* 2418 * Due to our use of dn_nlevels below, this can only be called 2419 * in open context, unless we are operating on the MOS. 2420 * From syncing context, dn_nlevels may be different from the 2421 * dn_nlevels used when dbuf was dirtied. 2422 */ 2423 ASSERT(db->db_objset == 2424 dmu_objset_pool(db->db_objset)->dp_meta_objset || 2425 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 2426 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2427 ASSERT0(db->db_level); 2428 ASSERT(MUTEX_HELD(&db->db_mtx)); 2429 2430 /* 2431 * If this buffer is not dirty, we're done. 2432 */ 2433 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg); 2434 if (dr == NULL) 2435 return (B_FALSE); 2436 ASSERT(dr->dr_dbuf == db); 2437 2438 dnode_t *dn = dr->dr_dnode; 2439 2440 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2441 2442 ASSERT(db->db.db_size != 0); 2443 2444 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 2445 dr->dr_accounted, txg); 2446 2447 list_remove(&db->db_dirty_records, dr); 2448 2449 /* 2450 * Note that there are three places in dbuf_dirty() 2451 * where this dirty record may be put on a list. 2452 * Make sure to do a list_remove corresponding to 2453 * every one of those list_insert calls. 2454 */ 2455 if (dr->dr_parent) { 2456 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 2457 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 2458 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 2459 } else if (db->db_blkid == DMU_SPILL_BLKID || 2460 db->db_level + 1 == dn->dn_nlevels) { 2461 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 2462 mutex_enter(&dn->dn_mtx); 2463 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 2464 mutex_exit(&dn->dn_mtx); 2465 } 2466 2467 if (db->db_state != DB_NOFILL) { 2468 dbuf_unoverride(dr); 2469 2470 ASSERT(db->db_buf != NULL); 2471 ASSERT(dr->dt.dl.dr_data != NULL); 2472 if (dr->dt.dl.dr_data != db->db_buf) 2473 arc_buf_destroy(dr->dt.dl.dr_data, db); 2474 } 2475 2476 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2477 2478 ASSERT(db->db_dirtycnt > 0); 2479 db->db_dirtycnt -= 1; 2480 2481 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 2482 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 2483 dbuf_destroy(db); 2484 return (B_TRUE); 2485 } 2486 2487 return (B_FALSE); 2488 } 2489 2490 static void 2491 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) 2492 { 2493 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2494 2495 ASSERT(tx->tx_txg != 0); 2496 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2497 2498 /* 2499 * Quick check for dirtiness. For already dirty blocks, this 2500 * reduces runtime of this function by >90%, and overall performance 2501 * by 50% for some workloads (e.g. file deletion with indirect blocks 2502 * cached). 2503 */ 2504 mutex_enter(&db->db_mtx); 2505 2506 if (db->db_state == DB_CACHED) { 2507 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2508 /* 2509 * It's possible that it is already dirty but not cached, 2510 * because there are some calls to dbuf_dirty() that don't 2511 * go through dmu_buf_will_dirty(). 2512 */ 2513 if (dr != NULL) { 2514 /* This dbuf is already dirty and cached. */ 2515 dbuf_redirty(dr); 2516 mutex_exit(&db->db_mtx); 2517 return; 2518 } 2519 } 2520 mutex_exit(&db->db_mtx); 2521 2522 DB_DNODE_ENTER(db); 2523 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 2524 flags |= DB_RF_HAVESTRUCT; 2525 DB_DNODE_EXIT(db); 2526 (void) dbuf_read(db, NULL, flags); 2527 (void) dbuf_dirty(db, tx); 2528 } 2529 2530 void 2531 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2532 { 2533 dmu_buf_will_dirty_impl(db_fake, 2534 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); 2535 } 2536 2537 boolean_t 2538 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2539 { 2540 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2541 dbuf_dirty_record_t *dr; 2542 2543 mutex_enter(&db->db_mtx); 2544 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2545 mutex_exit(&db->db_mtx); 2546 return (dr != NULL); 2547 } 2548 2549 void 2550 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2551 { 2552 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2553 2554 db->db_state = DB_NOFILL; 2555 DTRACE_SET_STATE(db, "allocating NOFILL buffer"); 2556 dmu_buf_will_fill(db_fake, tx); 2557 } 2558 2559 void 2560 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2561 { 2562 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2563 2564 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2565 ASSERT(tx->tx_txg != 0); 2566 ASSERT(db->db_level == 0); 2567 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2568 2569 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 2570 dmu_tx_private_ok(tx)); 2571 2572 dbuf_noread(db); 2573 (void) dbuf_dirty(db, tx); 2574 } 2575 2576 /* 2577 * This function is effectively the same as dmu_buf_will_dirty(), but 2578 * indicates the caller expects raw encrypted data in the db, and provides 2579 * the crypt params (byteorder, salt, iv, mac) which should be stored in the 2580 * blkptr_t when this dbuf is written. This is only used for blocks of 2581 * dnodes, during raw receive. 2582 */ 2583 void 2584 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, 2585 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) 2586 { 2587 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2588 dbuf_dirty_record_t *dr; 2589 2590 /* 2591 * dr_has_raw_params is only processed for blocks of dnodes 2592 * (see dbuf_sync_dnode_leaf_crypt()). 2593 */ 2594 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 2595 ASSERT3U(db->db_level, ==, 0); 2596 ASSERT(db->db_objset->os_raw_receive); 2597 2598 dmu_buf_will_dirty_impl(db_fake, 2599 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); 2600 2601 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2602 2603 ASSERT3P(dr, !=, NULL); 2604 2605 dr->dt.dl.dr_has_raw_params = B_TRUE; 2606 dr->dt.dl.dr_byteorder = byteorder; 2607 bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN); 2608 bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN); 2609 bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN); 2610 } 2611 2612 static void 2613 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx) 2614 { 2615 struct dirty_leaf *dl; 2616 dbuf_dirty_record_t *dr; 2617 2618 dr = list_head(&db->db_dirty_records); 2619 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2620 dl = &dr->dt.dl; 2621 dl->dr_overridden_by = *bp; 2622 dl->dr_override_state = DR_OVERRIDDEN; 2623 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2624 } 2625 2626 /* ARGSUSED */ 2627 void 2628 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx) 2629 { 2630 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2631 dbuf_states_t old_state; 2632 mutex_enter(&db->db_mtx); 2633 DBUF_VERIFY(db); 2634 2635 old_state = db->db_state; 2636 db->db_state = DB_CACHED; 2637 if (old_state == DB_FILL) { 2638 if (db->db_level == 0 && db->db_freed_in_flight) { 2639 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2640 /* we were freed while filling */ 2641 /* XXX dbuf_undirty? */ 2642 bzero(db->db.db_data, db->db.db_size); 2643 db->db_freed_in_flight = FALSE; 2644 DTRACE_SET_STATE(db, 2645 "fill done handling freed in flight"); 2646 } else { 2647 DTRACE_SET_STATE(db, "fill done"); 2648 } 2649 cv_broadcast(&db->db_changed); 2650 } 2651 mutex_exit(&db->db_mtx); 2652 } 2653 2654 void 2655 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 2656 bp_embedded_type_t etype, enum zio_compress comp, 2657 int uncompressed_size, int compressed_size, int byteorder, 2658 dmu_tx_t *tx) 2659 { 2660 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2661 struct dirty_leaf *dl; 2662 dmu_object_type_t type; 2663 dbuf_dirty_record_t *dr; 2664 2665 if (etype == BP_EMBEDDED_TYPE_DATA) { 2666 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 2667 SPA_FEATURE_EMBEDDED_DATA)); 2668 } 2669 2670 DB_DNODE_ENTER(db); 2671 type = DB_DNODE(db)->dn_type; 2672 DB_DNODE_EXIT(db); 2673 2674 ASSERT0(db->db_level); 2675 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2676 2677 dmu_buf_will_not_fill(dbuf, tx); 2678 2679 dr = list_head(&db->db_dirty_records); 2680 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2681 dl = &dr->dt.dl; 2682 encode_embedded_bp_compressed(&dl->dr_overridden_by, 2683 data, comp, uncompressed_size, compressed_size); 2684 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 2685 BP_SET_TYPE(&dl->dr_overridden_by, type); 2686 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 2687 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 2688 2689 dl->dr_override_state = DR_OVERRIDDEN; 2690 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2691 } 2692 2693 void 2694 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx) 2695 { 2696 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2697 dmu_object_type_t type; 2698 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset, 2699 SPA_FEATURE_REDACTED_DATASETS)); 2700 2701 DB_DNODE_ENTER(db); 2702 type = DB_DNODE(db)->dn_type; 2703 DB_DNODE_EXIT(db); 2704 2705 ASSERT0(db->db_level); 2706 dmu_buf_will_not_fill(dbuf, tx); 2707 2708 blkptr_t bp = { { { {0} } } }; 2709 BP_SET_TYPE(&bp, type); 2710 BP_SET_LEVEL(&bp, 0); 2711 BP_SET_BIRTH(&bp, tx->tx_txg, 0); 2712 BP_SET_REDACTED(&bp); 2713 BPE_SET_LSIZE(&bp, dbuf->db_size); 2714 2715 dbuf_override_impl(db, &bp, tx); 2716 } 2717 2718 /* 2719 * Directly assign a provided arc buf to a given dbuf if it's not referenced 2720 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 2721 */ 2722 void 2723 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 2724 { 2725 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2726 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2727 ASSERT(db->db_level == 0); 2728 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 2729 ASSERT(buf != NULL); 2730 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); 2731 ASSERT(tx->tx_txg != 0); 2732 2733 arc_return_buf(buf, db); 2734 ASSERT(arc_released(buf)); 2735 2736 mutex_enter(&db->db_mtx); 2737 2738 while (db->db_state == DB_READ || db->db_state == DB_FILL) 2739 cv_wait(&db->db_changed, &db->db_mtx); 2740 2741 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 2742 2743 if (db->db_state == DB_CACHED && 2744 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2745 /* 2746 * In practice, we will never have a case where we have an 2747 * encrypted arc buffer while additional holds exist on the 2748 * dbuf. We don't handle this here so we simply assert that 2749 * fact instead. 2750 */ 2751 ASSERT(!arc_is_encrypted(buf)); 2752 mutex_exit(&db->db_mtx); 2753 (void) dbuf_dirty(db, tx); 2754 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 2755 arc_buf_destroy(buf, db); 2756 return; 2757 } 2758 2759 if (db->db_state == DB_CACHED) { 2760 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 2761 2762 ASSERT(db->db_buf != NULL); 2763 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 2764 ASSERT(dr->dt.dl.dr_data == db->db_buf); 2765 2766 if (!arc_released(db->db_buf)) { 2767 ASSERT(dr->dt.dl.dr_override_state == 2768 DR_OVERRIDDEN); 2769 arc_release(db->db_buf, db); 2770 } 2771 dr->dt.dl.dr_data = buf; 2772 arc_buf_destroy(db->db_buf, db); 2773 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2774 arc_release(db->db_buf, db); 2775 arc_buf_destroy(db->db_buf, db); 2776 } 2777 db->db_buf = NULL; 2778 } 2779 ASSERT(db->db_buf == NULL); 2780 dbuf_set_data(db, buf); 2781 db->db_state = DB_FILL; 2782 DTRACE_SET_STATE(db, "filling assigned arcbuf"); 2783 mutex_exit(&db->db_mtx); 2784 (void) dbuf_dirty(db, tx); 2785 dmu_buf_fill_done(&db->db, tx); 2786 } 2787 2788 void 2789 dbuf_destroy(dmu_buf_impl_t *db) 2790 { 2791 dnode_t *dn; 2792 dmu_buf_impl_t *parent = db->db_parent; 2793 dmu_buf_impl_t *dndb; 2794 2795 ASSERT(MUTEX_HELD(&db->db_mtx)); 2796 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2797 2798 if (db->db_buf != NULL) { 2799 arc_buf_destroy(db->db_buf, db); 2800 db->db_buf = NULL; 2801 } 2802 2803 if (db->db_blkid == DMU_BONUS_BLKID) { 2804 int slots = DB_DNODE(db)->dn_num_slots; 2805 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 2806 if (db->db.db_data != NULL) { 2807 kmem_free(db->db.db_data, bonuslen); 2808 arc_space_return(bonuslen, ARC_SPACE_BONUS); 2809 db->db_state = DB_UNCACHED; 2810 DTRACE_SET_STATE(db, "buffer cleared"); 2811 } 2812 } 2813 2814 dbuf_clear_data(db); 2815 2816 if (multilist_link_active(&db->db_cache_link)) { 2817 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2818 db->db_caching_status == DB_DBUF_METADATA_CACHE); 2819 2820 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 2821 (void) zfs_refcount_remove_many( 2822 &dbuf_caches[db->db_caching_status].size, 2823 db->db.db_size, db); 2824 2825 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 2826 DBUF_STAT_BUMPDOWN(metadata_cache_count); 2827 } else { 2828 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 2829 DBUF_STAT_BUMPDOWN(cache_count); 2830 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 2831 db->db.db_size); 2832 } 2833 db->db_caching_status = DB_NO_CACHE; 2834 } 2835 2836 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2837 ASSERT(db->db_data_pending == NULL); 2838 ASSERT(list_is_empty(&db->db_dirty_records)); 2839 2840 db->db_state = DB_EVICTING; 2841 DTRACE_SET_STATE(db, "buffer eviction started"); 2842 db->db_blkptr = NULL; 2843 2844 /* 2845 * Now that db_state is DB_EVICTING, nobody else can find this via 2846 * the hash table. We can now drop db_mtx, which allows us to 2847 * acquire the dn_dbufs_mtx. 2848 */ 2849 mutex_exit(&db->db_mtx); 2850 2851 DB_DNODE_ENTER(db); 2852 dn = DB_DNODE(db); 2853 dndb = dn->dn_dbuf; 2854 if (db->db_blkid != DMU_BONUS_BLKID) { 2855 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2856 if (needlock) 2857 mutex_enter_nested(&dn->dn_dbufs_mtx, 2858 NESTED_SINGLE); 2859 avl_remove(&dn->dn_dbufs, db); 2860 membar_producer(); 2861 DB_DNODE_EXIT(db); 2862 if (needlock) 2863 mutex_exit(&dn->dn_dbufs_mtx); 2864 /* 2865 * Decrementing the dbuf count means that the hold corresponding 2866 * to the removed dbuf is no longer discounted in dnode_move(), 2867 * so the dnode cannot be moved until after we release the hold. 2868 * The membar_producer() ensures visibility of the decremented 2869 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2870 * release any lock. 2871 */ 2872 mutex_enter(&dn->dn_mtx); 2873 dnode_rele_and_unlock(dn, db, B_TRUE); 2874 db->db_dnode_handle = NULL; 2875 2876 dbuf_hash_remove(db); 2877 } else { 2878 DB_DNODE_EXIT(db); 2879 } 2880 2881 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2882 2883 db->db_parent = NULL; 2884 2885 ASSERT(db->db_buf == NULL); 2886 ASSERT(db->db.db_data == NULL); 2887 ASSERT(db->db_hash_next == NULL); 2888 ASSERT(db->db_blkptr == NULL); 2889 ASSERT(db->db_data_pending == NULL); 2890 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2891 ASSERT(!multilist_link_active(&db->db_cache_link)); 2892 2893 kmem_cache_free(dbuf_kmem_cache, db); 2894 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 2895 2896 /* 2897 * If this dbuf is referenced from an indirect dbuf, 2898 * decrement the ref count on the indirect dbuf. 2899 */ 2900 if (parent && parent != dndb) { 2901 mutex_enter(&parent->db_mtx); 2902 dbuf_rele_and_unlock(parent, db, B_TRUE); 2903 } 2904 } 2905 2906 /* 2907 * Note: While bpp will always be updated if the function returns success, 2908 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2909 * this happens when the dnode is the meta-dnode, or {user|group|project}used 2910 * object. 2911 */ 2912 __attribute__((always_inline)) 2913 static inline int 2914 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2915 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2916 { 2917 *parentp = NULL; 2918 *bpp = NULL; 2919 2920 ASSERT(blkid != DMU_BONUS_BLKID); 2921 2922 if (blkid == DMU_SPILL_BLKID) { 2923 mutex_enter(&dn->dn_mtx); 2924 if (dn->dn_have_spill && 2925 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 2926 *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 2927 else 2928 *bpp = NULL; 2929 dbuf_add_ref(dn->dn_dbuf, NULL); 2930 *parentp = dn->dn_dbuf; 2931 mutex_exit(&dn->dn_mtx); 2932 return (0); 2933 } 2934 2935 int nlevels = 2936 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 2937 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2938 2939 ASSERT3U(level * epbs, <, 64); 2940 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2941 /* 2942 * This assertion shouldn't trip as long as the max indirect block size 2943 * is less than 1M. The reason for this is that up to that point, 2944 * the number of levels required to address an entire object with blocks 2945 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 2946 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 2947 * (i.e. we can address the entire object), objects will all use at most 2948 * N-1 levels and the assertion won't overflow. However, once epbs is 2949 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 2950 * enough to address an entire object, so objects will have 5 levels, 2951 * but then this assertion will overflow. 2952 * 2953 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 2954 * need to redo this logic to handle overflows. 2955 */ 2956 ASSERT(level >= nlevels || 2957 ((nlevels - level - 1) * epbs) + 2958 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2959 if (level >= nlevels || 2960 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 2961 ((nlevels - level - 1) * epbs)) || 2962 (fail_sparse && 2963 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2964 /* the buffer has no parent yet */ 2965 return (SET_ERROR(ENOENT)); 2966 } else if (level < nlevels-1) { 2967 /* this block is referenced from an indirect block */ 2968 int err; 2969 2970 err = dbuf_hold_impl(dn, level + 1, 2971 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2972 2973 if (err) 2974 return (err); 2975 err = dbuf_read(*parentp, NULL, 2976 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2977 if (err) { 2978 dbuf_rele(*parentp, NULL); 2979 *parentp = NULL; 2980 return (err); 2981 } 2982 rw_enter(&(*parentp)->db_rwlock, RW_READER); 2983 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2984 (blkid & ((1ULL << epbs) - 1)); 2985 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 2986 ASSERT(BP_IS_HOLE(*bpp)); 2987 rw_exit(&(*parentp)->db_rwlock); 2988 return (0); 2989 } else { 2990 /* the block is referenced from the dnode */ 2991 ASSERT3U(level, ==, nlevels-1); 2992 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2993 blkid < dn->dn_phys->dn_nblkptr); 2994 if (dn->dn_dbuf) { 2995 dbuf_add_ref(dn->dn_dbuf, NULL); 2996 *parentp = dn->dn_dbuf; 2997 } 2998 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2999 return (0); 3000 } 3001 } 3002 3003 static dmu_buf_impl_t * 3004 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 3005 dmu_buf_impl_t *parent, blkptr_t *blkptr) 3006 { 3007 objset_t *os = dn->dn_objset; 3008 dmu_buf_impl_t *db, *odb; 3009 3010 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3011 ASSERT(dn->dn_type != DMU_OT_NONE); 3012 3013 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 3014 3015 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t), 3016 offsetof(dbuf_dirty_record_t, dr_dbuf_node)); 3017 3018 db->db_objset = os; 3019 db->db.db_object = dn->dn_object; 3020 db->db_level = level; 3021 db->db_blkid = blkid; 3022 db->db_dirtycnt = 0; 3023 db->db_dnode_handle = dn->dn_handle; 3024 db->db_parent = parent; 3025 db->db_blkptr = blkptr; 3026 3027 db->db_user = NULL; 3028 db->db_user_immediate_evict = FALSE; 3029 db->db_freed_in_flight = FALSE; 3030 db->db_pending_evict = FALSE; 3031 3032 if (blkid == DMU_BONUS_BLKID) { 3033 ASSERT3P(parent, ==, dn->dn_dbuf); 3034 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 3035 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 3036 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 3037 db->db.db_offset = DMU_BONUS_BLKID; 3038 db->db_state = DB_UNCACHED; 3039 DTRACE_SET_STATE(db, "bonus buffer created"); 3040 db->db_caching_status = DB_NO_CACHE; 3041 /* the bonus dbuf is not placed in the hash table */ 3042 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3043 return (db); 3044 } else if (blkid == DMU_SPILL_BLKID) { 3045 db->db.db_size = (blkptr != NULL) ? 3046 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 3047 db->db.db_offset = 0; 3048 } else { 3049 int blocksize = 3050 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 3051 db->db.db_size = blocksize; 3052 db->db.db_offset = db->db_blkid * blocksize; 3053 } 3054 3055 /* 3056 * Hold the dn_dbufs_mtx while we get the new dbuf 3057 * in the hash table *and* added to the dbufs list. 3058 * This prevents a possible deadlock with someone 3059 * trying to look up this dbuf before it's added to the 3060 * dn_dbufs list. 3061 */ 3062 mutex_enter(&dn->dn_dbufs_mtx); 3063 db->db_state = DB_EVICTING; /* not worth logging this state change */ 3064 if ((odb = dbuf_hash_insert(db)) != NULL) { 3065 /* someone else inserted it first */ 3066 kmem_cache_free(dbuf_kmem_cache, db); 3067 mutex_exit(&dn->dn_dbufs_mtx); 3068 DBUF_STAT_BUMP(hash_insert_race); 3069 return (odb); 3070 } 3071 avl_add(&dn->dn_dbufs, db); 3072 3073 db->db_state = DB_UNCACHED; 3074 DTRACE_SET_STATE(db, "regular buffer created"); 3075 db->db_caching_status = DB_NO_CACHE; 3076 mutex_exit(&dn->dn_dbufs_mtx); 3077 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3078 3079 if (parent && parent != dn->dn_dbuf) 3080 dbuf_add_ref(parent, db); 3081 3082 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 3083 zfs_refcount_count(&dn->dn_holds) > 0); 3084 (void) zfs_refcount_add(&dn->dn_holds, db); 3085 3086 dprintf_dbuf(db, "db=%p\n", db); 3087 3088 return (db); 3089 } 3090 3091 /* 3092 * This function returns a block pointer and information about the object, 3093 * given a dnode and a block. This is a publicly accessible version of 3094 * dbuf_findbp that only returns some information, rather than the 3095 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock 3096 * should be locked as (at least) a reader. 3097 */ 3098 int 3099 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid, 3100 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift) 3101 { 3102 dmu_buf_impl_t *dbp = NULL; 3103 blkptr_t *bp2; 3104 int err = 0; 3105 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3106 3107 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2); 3108 if (err == 0) { 3109 *bp = *bp2; 3110 if (dbp != NULL) 3111 dbuf_rele(dbp, NULL); 3112 if (datablkszsec != NULL) 3113 *datablkszsec = dn->dn_phys->dn_datablkszsec; 3114 if (indblkshift != NULL) 3115 *indblkshift = dn->dn_phys->dn_indblkshift; 3116 } 3117 3118 return (err); 3119 } 3120 3121 typedef struct dbuf_prefetch_arg { 3122 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 3123 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 3124 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 3125 int dpa_curlevel; /* The current level that we're reading */ 3126 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 3127 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 3128 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 3129 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 3130 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */ 3131 void *dpa_arg; /* prefetch completion arg */ 3132 } dbuf_prefetch_arg_t; 3133 3134 static void 3135 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done) 3136 { 3137 if (dpa->dpa_cb != NULL) 3138 dpa->dpa_cb(dpa->dpa_arg, io_done); 3139 kmem_free(dpa, sizeof (*dpa)); 3140 } 3141 3142 static void 3143 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb, 3144 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3145 { 3146 dbuf_prefetch_arg_t *dpa = private; 3147 3148 dbuf_prefetch_fini(dpa, B_TRUE); 3149 if (abuf != NULL) 3150 arc_buf_destroy(abuf, private); 3151 } 3152 3153 /* 3154 * Actually issue the prefetch read for the block given. 3155 */ 3156 static void 3157 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 3158 { 3159 ASSERT(!BP_IS_REDACTED(bp) || 3160 dsl_dataset_feature_is_active( 3161 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3162 SPA_FEATURE_REDACTED_DATASETS)); 3163 3164 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 3165 return (dbuf_prefetch_fini(dpa, B_FALSE)); 3166 3167 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 3168 arc_flags_t aflags = 3169 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 3170 ARC_FLAG_NO_BUF; 3171 3172 /* dnodes are always read as raw and then converted later */ 3173 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && 3174 dpa->dpa_curlevel == 0) 3175 zio_flags |= ZIO_FLAG_RAW; 3176 3177 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3178 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 3179 ASSERT(dpa->dpa_zio != NULL); 3180 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, 3181 dbuf_issue_final_prefetch_done, dpa, 3182 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); 3183 } 3184 3185 /* 3186 * Called when an indirect block above our prefetch target is read in. This 3187 * will either read in the next indirect block down the tree or issue the actual 3188 * prefetch if the next block down is our target. 3189 */ 3190 static void 3191 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 3192 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3193 { 3194 dbuf_prefetch_arg_t *dpa = private; 3195 3196 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 3197 ASSERT3S(dpa->dpa_curlevel, >, 0); 3198 3199 if (abuf == NULL) { 3200 ASSERT(zio == NULL || zio->io_error != 0); 3201 return (dbuf_prefetch_fini(dpa, B_TRUE)); 3202 } 3203 ASSERT(zio == NULL || zio->io_error == 0); 3204 3205 /* 3206 * The dpa_dnode is only valid if we are called with a NULL 3207 * zio. This indicates that the arc_read() returned without 3208 * first calling zio_read() to issue a physical read. Once 3209 * a physical read is made the dpa_dnode must be invalidated 3210 * as the locks guarding it may have been dropped. If the 3211 * dpa_dnode is still valid, then we want to add it to the dbuf 3212 * cache. To do so, we must hold the dbuf associated with the block 3213 * we just prefetched, read its contents so that we associate it 3214 * with an arc_buf_t, and then release it. 3215 */ 3216 if (zio != NULL) { 3217 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 3218 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { 3219 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 3220 } else { 3221 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 3222 } 3223 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 3224 3225 dpa->dpa_dnode = NULL; 3226 } else if (dpa->dpa_dnode != NULL) { 3227 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 3228 (dpa->dpa_epbs * (dpa->dpa_curlevel - 3229 dpa->dpa_zb.zb_level)); 3230 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 3231 dpa->dpa_curlevel, curblkid, FTAG); 3232 if (db == NULL) { 3233 arc_buf_destroy(abuf, private); 3234 return (dbuf_prefetch_fini(dpa, B_TRUE)); 3235 } 3236 (void) dbuf_read(db, NULL, 3237 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 3238 dbuf_rele(db, FTAG); 3239 } 3240 3241 dpa->dpa_curlevel--; 3242 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 3243 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 3244 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 3245 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 3246 3247 ASSERT(!BP_IS_REDACTED(bp) || 3248 dsl_dataset_feature_is_active( 3249 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3250 SPA_FEATURE_REDACTED_DATASETS)); 3251 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 3252 dbuf_prefetch_fini(dpa, B_TRUE); 3253 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 3254 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 3255 dbuf_issue_final_prefetch(dpa, bp); 3256 } else { 3257 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3258 zbookmark_phys_t zb; 3259 3260 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3261 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 3262 iter_aflags |= ARC_FLAG_L2CACHE; 3263 3264 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3265 3266 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 3267 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 3268 3269 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3270 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 3271 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3272 &iter_aflags, &zb); 3273 } 3274 3275 arc_buf_destroy(abuf, private); 3276 } 3277 3278 /* 3279 * Issue prefetch reads for the given block on the given level. If the indirect 3280 * blocks above that block are not in memory, we will read them in 3281 * asynchronously. As a result, this call never blocks waiting for a read to 3282 * complete. Note that the prefetch might fail if the dataset is encrypted and 3283 * the encryption key is unmapped before the IO completes. 3284 */ 3285 int 3286 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid, 3287 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb, 3288 void *arg) 3289 { 3290 blkptr_t bp; 3291 int epbs, nlevels, curlevel; 3292 uint64_t curblkid; 3293 3294 ASSERT(blkid != DMU_BONUS_BLKID); 3295 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3296 3297 if (blkid > dn->dn_maxblkid) 3298 goto no_issue; 3299 3300 if (level == 0 && dnode_block_freed(dn, blkid)) 3301 goto no_issue; 3302 3303 /* 3304 * This dnode hasn't been written to disk yet, so there's nothing to 3305 * prefetch. 3306 */ 3307 nlevels = dn->dn_phys->dn_nlevels; 3308 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 3309 goto no_issue; 3310 3311 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3312 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 3313 goto no_issue; 3314 3315 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 3316 level, blkid); 3317 if (db != NULL) { 3318 mutex_exit(&db->db_mtx); 3319 /* 3320 * This dbuf already exists. It is either CACHED, or 3321 * (we assume) about to be read or filled. 3322 */ 3323 goto no_issue; 3324 } 3325 3326 /* 3327 * Find the closest ancestor (indirect block) of the target block 3328 * that is present in the cache. In this indirect block, we will 3329 * find the bp that is at curlevel, curblkid. 3330 */ 3331 curlevel = level; 3332 curblkid = blkid; 3333 while (curlevel < nlevels - 1) { 3334 int parent_level = curlevel + 1; 3335 uint64_t parent_blkid = curblkid >> epbs; 3336 dmu_buf_impl_t *db; 3337 3338 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 3339 FALSE, TRUE, FTAG, &db) == 0) { 3340 blkptr_t *bpp = db->db_buf->b_data; 3341 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 3342 dbuf_rele(db, FTAG); 3343 break; 3344 } 3345 3346 curlevel = parent_level; 3347 curblkid = parent_blkid; 3348 } 3349 3350 if (curlevel == nlevels - 1) { 3351 /* No cached indirect blocks found. */ 3352 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 3353 bp = dn->dn_phys->dn_blkptr[curblkid]; 3354 } 3355 ASSERT(!BP_IS_REDACTED(&bp) || 3356 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset, 3357 SPA_FEATURE_REDACTED_DATASETS)); 3358 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp)) 3359 goto no_issue; 3360 3361 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 3362 3363 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 3364 ZIO_FLAG_CANFAIL); 3365 3366 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 3367 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 3368 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3369 dn->dn_object, level, blkid); 3370 dpa->dpa_curlevel = curlevel; 3371 dpa->dpa_prio = prio; 3372 dpa->dpa_aflags = aflags; 3373 dpa->dpa_spa = dn->dn_objset->os_spa; 3374 dpa->dpa_dnode = dn; 3375 dpa->dpa_epbs = epbs; 3376 dpa->dpa_zio = pio; 3377 dpa->dpa_cb = cb; 3378 dpa->dpa_arg = arg; 3379 3380 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3381 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 3382 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 3383 3384 /* 3385 * If we have the indirect just above us, no need to do the asynchronous 3386 * prefetch chain; we'll just run the last step ourselves. If we're at 3387 * a higher level, though, we want to issue the prefetches for all the 3388 * indirect blocks asynchronously, so we can go on with whatever we were 3389 * doing. 3390 */ 3391 if (curlevel == level) { 3392 ASSERT3U(curblkid, ==, blkid); 3393 dbuf_issue_final_prefetch(dpa, &bp); 3394 } else { 3395 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3396 zbookmark_phys_t zb; 3397 3398 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3399 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 3400 iter_aflags |= ARC_FLAG_L2CACHE; 3401 3402 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3403 dn->dn_object, curlevel, curblkid); 3404 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3405 &bp, dbuf_prefetch_indirect_done, dpa, prio, 3406 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3407 &iter_aflags, &zb); 3408 } 3409 /* 3410 * We use pio here instead of dpa_zio since it's possible that 3411 * dpa may have already been freed. 3412 */ 3413 zio_nowait(pio); 3414 return (1); 3415 no_issue: 3416 if (cb != NULL) 3417 cb(arg, B_FALSE); 3418 return (0); 3419 } 3420 3421 int 3422 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 3423 arc_flags_t aflags) 3424 { 3425 3426 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL)); 3427 } 3428 3429 /* 3430 * Helper function for dbuf_hold_impl() to copy a buffer. Handles 3431 * the case of encrypted, compressed and uncompressed buffers by 3432 * allocating the new buffer, respectively, with arc_alloc_raw_buf(), 3433 * arc_alloc_compressed_buf() or arc_alloc_buf().* 3434 * 3435 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl(). 3436 */ 3437 noinline static void 3438 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db) 3439 { 3440 dbuf_dirty_record_t *dr = db->db_data_pending; 3441 arc_buf_t *newdata, *data = dr->dt.dl.dr_data; 3442 3443 newdata = dbuf_alloc_arcbuf_from_arcbuf(db, data); 3444 dbuf_set_data(db, newdata); 3445 rw_enter(&db->db_rwlock, RW_WRITER); 3446 bcopy(data->b_data, db->db.db_data, arc_buf_size(data)); 3447 rw_exit(&db->db_rwlock); 3448 } 3449 3450 /* 3451 * Returns with db_holds incremented, and db_mtx not held. 3452 * Note: dn_struct_rwlock must be held. 3453 */ 3454 int 3455 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 3456 boolean_t fail_sparse, boolean_t fail_uncached, 3457 void *tag, dmu_buf_impl_t **dbp) 3458 { 3459 dmu_buf_impl_t *db, *parent = NULL; 3460 3461 /* If the pool has been created, verify the tx_sync_lock is not held */ 3462 spa_t *spa = dn->dn_objset->os_spa; 3463 dsl_pool_t *dp = spa->spa_dsl_pool; 3464 if (dp != NULL) { 3465 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock)); 3466 } 3467 3468 ASSERT(blkid != DMU_BONUS_BLKID); 3469 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3470 ASSERT3U(dn->dn_nlevels, >, level); 3471 3472 *dbp = NULL; 3473 3474 /* dbuf_find() returns with db_mtx held */ 3475 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 3476 3477 if (db == NULL) { 3478 blkptr_t *bp = NULL; 3479 int err; 3480 3481 if (fail_uncached) 3482 return (SET_ERROR(ENOENT)); 3483 3484 ASSERT3P(parent, ==, NULL); 3485 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 3486 if (fail_sparse) { 3487 if (err == 0 && bp && BP_IS_HOLE(bp)) 3488 err = SET_ERROR(ENOENT); 3489 if (err) { 3490 if (parent) 3491 dbuf_rele(parent, NULL); 3492 return (err); 3493 } 3494 } 3495 if (err && err != ENOENT) 3496 return (err); 3497 db = dbuf_create(dn, level, blkid, parent, bp); 3498 } 3499 3500 if (fail_uncached && db->db_state != DB_CACHED) { 3501 mutex_exit(&db->db_mtx); 3502 return (SET_ERROR(ENOENT)); 3503 } 3504 3505 if (db->db_buf != NULL) { 3506 arc_buf_access(db->db_buf); 3507 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 3508 } 3509 3510 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 3511 3512 /* 3513 * If this buffer is currently syncing out, and we are 3514 * still referencing it from db_data, we need to make a copy 3515 * of it in case we decide we want to dirty it again in this txg. 3516 */ 3517 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 3518 dn->dn_object != DMU_META_DNODE_OBJECT && 3519 db->db_state == DB_CACHED && db->db_data_pending) { 3520 dbuf_dirty_record_t *dr = db->db_data_pending; 3521 if (dr->dt.dl.dr_data == db->db_buf) 3522 dbuf_hold_copy(dn, db); 3523 } 3524 3525 if (multilist_link_active(&db->db_cache_link)) { 3526 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3527 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 3528 db->db_caching_status == DB_DBUF_METADATA_CACHE); 3529 3530 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 3531 (void) zfs_refcount_remove_many( 3532 &dbuf_caches[db->db_caching_status].size, 3533 db->db.db_size, db); 3534 3535 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 3536 DBUF_STAT_BUMPDOWN(metadata_cache_count); 3537 } else { 3538 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 3539 DBUF_STAT_BUMPDOWN(cache_count); 3540 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 3541 db->db.db_size); 3542 } 3543 db->db_caching_status = DB_NO_CACHE; 3544 } 3545 (void) zfs_refcount_add(&db->db_holds, tag); 3546 DBUF_VERIFY(db); 3547 mutex_exit(&db->db_mtx); 3548 3549 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 3550 if (parent) 3551 dbuf_rele(parent, NULL); 3552 3553 ASSERT3P(DB_DNODE(db), ==, dn); 3554 ASSERT3U(db->db_blkid, ==, blkid); 3555 ASSERT3U(db->db_level, ==, level); 3556 *dbp = db; 3557 3558 return (0); 3559 } 3560 3561 dmu_buf_impl_t * 3562 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 3563 { 3564 return (dbuf_hold_level(dn, 0, blkid, tag)); 3565 } 3566 3567 dmu_buf_impl_t * 3568 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 3569 { 3570 dmu_buf_impl_t *db; 3571 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 3572 return (err ? NULL : db); 3573 } 3574 3575 void 3576 dbuf_create_bonus(dnode_t *dn) 3577 { 3578 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 3579 3580 ASSERT(dn->dn_bonus == NULL); 3581 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 3582 } 3583 3584 int 3585 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 3586 { 3587 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3588 3589 if (db->db_blkid != DMU_SPILL_BLKID) 3590 return (SET_ERROR(ENOTSUP)); 3591 if (blksz == 0) 3592 blksz = SPA_MINBLOCKSIZE; 3593 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 3594 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 3595 3596 dbuf_new_size(db, blksz, tx); 3597 3598 return (0); 3599 } 3600 3601 void 3602 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 3603 { 3604 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 3605 } 3606 3607 #pragma weak dmu_buf_add_ref = dbuf_add_ref 3608 void 3609 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 3610 { 3611 int64_t holds = zfs_refcount_add(&db->db_holds, tag); 3612 VERIFY3S(holds, >, 1); 3613 } 3614 3615 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 3616 boolean_t 3617 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 3618 void *tag) 3619 { 3620 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3621 dmu_buf_impl_t *found_db; 3622 boolean_t result = B_FALSE; 3623 3624 if (blkid == DMU_BONUS_BLKID) 3625 found_db = dbuf_find_bonus(os, obj); 3626 else 3627 found_db = dbuf_find(os, obj, 0, blkid); 3628 3629 if (found_db != NULL) { 3630 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 3631 (void) zfs_refcount_add(&db->db_holds, tag); 3632 result = B_TRUE; 3633 } 3634 mutex_exit(&found_db->db_mtx); 3635 } 3636 return (result); 3637 } 3638 3639 /* 3640 * If you call dbuf_rele() you had better not be referencing the dnode handle 3641 * unless you have some other direct or indirect hold on the dnode. (An indirect 3642 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 3643 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 3644 * dnode's parent dbuf evicting its dnode handles. 3645 */ 3646 void 3647 dbuf_rele(dmu_buf_impl_t *db, void *tag) 3648 { 3649 mutex_enter(&db->db_mtx); 3650 dbuf_rele_and_unlock(db, tag, B_FALSE); 3651 } 3652 3653 void 3654 dmu_buf_rele(dmu_buf_t *db, void *tag) 3655 { 3656 dbuf_rele((dmu_buf_impl_t *)db, tag); 3657 } 3658 3659 /* 3660 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 3661 * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 3662 * argument should be set if we are already in the dbuf-evicting code 3663 * path, in which case we don't want to recursively evict. This allows us to 3664 * avoid deeply nested stacks that would have a call flow similar to this: 3665 * 3666 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 3667 * ^ | 3668 * | | 3669 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 3670 * 3671 */ 3672 void 3673 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting) 3674 { 3675 int64_t holds; 3676 uint64_t size; 3677 3678 ASSERT(MUTEX_HELD(&db->db_mtx)); 3679 DBUF_VERIFY(db); 3680 3681 /* 3682 * Remove the reference to the dbuf before removing its hold on the 3683 * dnode so we can guarantee in dnode_move() that a referenced bonus 3684 * buffer has a corresponding dnode hold. 3685 */ 3686 holds = zfs_refcount_remove(&db->db_holds, tag); 3687 ASSERT(holds >= 0); 3688 3689 /* 3690 * We can't freeze indirects if there is a possibility that they 3691 * may be modified in the current syncing context. 3692 */ 3693 if (db->db_buf != NULL && 3694 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 3695 arc_buf_freeze(db->db_buf); 3696 } 3697 3698 if (holds == db->db_dirtycnt && 3699 db->db_level == 0 && db->db_user_immediate_evict) 3700 dbuf_evict_user(db); 3701 3702 if (holds == 0) { 3703 if (db->db_blkid == DMU_BONUS_BLKID) { 3704 dnode_t *dn; 3705 boolean_t evict_dbuf = db->db_pending_evict; 3706 3707 /* 3708 * If the dnode moves here, we cannot cross this 3709 * barrier until the move completes. 3710 */ 3711 DB_DNODE_ENTER(db); 3712 3713 dn = DB_DNODE(db); 3714 atomic_dec_32(&dn->dn_dbufs_count); 3715 3716 /* 3717 * Decrementing the dbuf count means that the bonus 3718 * buffer's dnode hold is no longer discounted in 3719 * dnode_move(). The dnode cannot move until after 3720 * the dnode_rele() below. 3721 */ 3722 DB_DNODE_EXIT(db); 3723 3724 /* 3725 * Do not reference db after its lock is dropped. 3726 * Another thread may evict it. 3727 */ 3728 mutex_exit(&db->db_mtx); 3729 3730 if (evict_dbuf) 3731 dnode_evict_bonus(dn); 3732 3733 dnode_rele(dn, db); 3734 } else if (db->db_buf == NULL) { 3735 /* 3736 * This is a special case: we never associated this 3737 * dbuf with any data allocated from the ARC. 3738 */ 3739 ASSERT(db->db_state == DB_UNCACHED || 3740 db->db_state == DB_NOFILL); 3741 dbuf_destroy(db); 3742 } else if (arc_released(db->db_buf)) { 3743 /* 3744 * This dbuf has anonymous data associated with it. 3745 */ 3746 dbuf_destroy(db); 3747 } else { 3748 boolean_t do_arc_evict = B_FALSE; 3749 blkptr_t bp; 3750 spa_t *spa = dmu_objset_spa(db->db_objset); 3751 3752 if (!DBUF_IS_CACHEABLE(db) && 3753 db->db_blkptr != NULL && 3754 !BP_IS_HOLE(db->db_blkptr) && 3755 !BP_IS_EMBEDDED(db->db_blkptr)) { 3756 do_arc_evict = B_TRUE; 3757 bp = *db->db_blkptr; 3758 } 3759 3760 if (!DBUF_IS_CACHEABLE(db) || 3761 db->db_pending_evict) { 3762 dbuf_destroy(db); 3763 } else if (!multilist_link_active(&db->db_cache_link)) { 3764 ASSERT3U(db->db_caching_status, ==, 3765 DB_NO_CACHE); 3766 3767 dbuf_cached_state_t dcs = 3768 dbuf_include_in_metadata_cache(db) ? 3769 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 3770 db->db_caching_status = dcs; 3771 3772 multilist_insert(&dbuf_caches[dcs].cache, db); 3773 uint64_t db_size = db->db.db_size; 3774 size = zfs_refcount_add_many( 3775 &dbuf_caches[dcs].size, db_size, db); 3776 uint8_t db_level = db->db_level; 3777 mutex_exit(&db->db_mtx); 3778 3779 if (dcs == DB_DBUF_METADATA_CACHE) { 3780 DBUF_STAT_BUMP(metadata_cache_count); 3781 DBUF_STAT_MAX( 3782 metadata_cache_size_bytes_max, 3783 size); 3784 } else { 3785 DBUF_STAT_BUMP(cache_count); 3786 DBUF_STAT_MAX(cache_size_bytes_max, 3787 size); 3788 DBUF_STAT_BUMP(cache_levels[db_level]); 3789 DBUF_STAT_INCR( 3790 cache_levels_bytes[db_level], 3791 db_size); 3792 } 3793 3794 if (dcs == DB_DBUF_CACHE && !evicting) 3795 dbuf_evict_notify(size); 3796 } 3797 3798 if (do_arc_evict) 3799 arc_freed(spa, &bp); 3800 } 3801 } else { 3802 mutex_exit(&db->db_mtx); 3803 } 3804 3805 } 3806 3807 #pragma weak dmu_buf_refcount = dbuf_refcount 3808 uint64_t 3809 dbuf_refcount(dmu_buf_impl_t *db) 3810 { 3811 return (zfs_refcount_count(&db->db_holds)); 3812 } 3813 3814 uint64_t 3815 dmu_buf_user_refcount(dmu_buf_t *db_fake) 3816 { 3817 uint64_t holds; 3818 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3819 3820 mutex_enter(&db->db_mtx); 3821 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); 3822 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; 3823 mutex_exit(&db->db_mtx); 3824 3825 return (holds); 3826 } 3827 3828 void * 3829 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 3830 dmu_buf_user_t *new_user) 3831 { 3832 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3833 3834 mutex_enter(&db->db_mtx); 3835 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3836 if (db->db_user == old_user) 3837 db->db_user = new_user; 3838 else 3839 old_user = db->db_user; 3840 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3841 mutex_exit(&db->db_mtx); 3842 3843 return (old_user); 3844 } 3845 3846 void * 3847 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3848 { 3849 return (dmu_buf_replace_user(db_fake, NULL, user)); 3850 } 3851 3852 void * 3853 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3854 { 3855 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3856 3857 db->db_user_immediate_evict = TRUE; 3858 return (dmu_buf_set_user(db_fake, user)); 3859 } 3860 3861 void * 3862 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3863 { 3864 return (dmu_buf_replace_user(db_fake, user, NULL)); 3865 } 3866 3867 void * 3868 dmu_buf_get_user(dmu_buf_t *db_fake) 3869 { 3870 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3871 3872 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3873 return (db->db_user); 3874 } 3875 3876 void 3877 dmu_buf_user_evict_wait() 3878 { 3879 taskq_wait(dbu_evict_taskq); 3880 } 3881 3882 blkptr_t * 3883 dmu_buf_get_blkptr(dmu_buf_t *db) 3884 { 3885 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3886 return (dbi->db_blkptr); 3887 } 3888 3889 objset_t * 3890 dmu_buf_get_objset(dmu_buf_t *db) 3891 { 3892 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3893 return (dbi->db_objset); 3894 } 3895 3896 dnode_t * 3897 dmu_buf_dnode_enter(dmu_buf_t *db) 3898 { 3899 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3900 DB_DNODE_ENTER(dbi); 3901 return (DB_DNODE(dbi)); 3902 } 3903 3904 void 3905 dmu_buf_dnode_exit(dmu_buf_t *db) 3906 { 3907 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3908 DB_DNODE_EXIT(dbi); 3909 } 3910 3911 static void 3912 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 3913 { 3914 /* ASSERT(dmu_tx_is_syncing(tx) */ 3915 ASSERT(MUTEX_HELD(&db->db_mtx)); 3916 3917 if (db->db_blkptr != NULL) 3918 return; 3919 3920 if (db->db_blkid == DMU_SPILL_BLKID) { 3921 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 3922 BP_ZERO(db->db_blkptr); 3923 return; 3924 } 3925 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 3926 /* 3927 * This buffer was allocated at a time when there was 3928 * no available blkptrs from the dnode, or it was 3929 * inappropriate to hook it in (i.e., nlevels mismatch). 3930 */ 3931 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 3932 ASSERT(db->db_parent == NULL); 3933 db->db_parent = dn->dn_dbuf; 3934 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 3935 DBUF_VERIFY(db); 3936 } else { 3937 dmu_buf_impl_t *parent = db->db_parent; 3938 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3939 3940 ASSERT(dn->dn_phys->dn_nlevels > 1); 3941 if (parent == NULL) { 3942 mutex_exit(&db->db_mtx); 3943 rw_enter(&dn->dn_struct_rwlock, RW_READER); 3944 parent = dbuf_hold_level(dn, db->db_level + 1, 3945 db->db_blkid >> epbs, db); 3946 rw_exit(&dn->dn_struct_rwlock); 3947 mutex_enter(&db->db_mtx); 3948 db->db_parent = parent; 3949 } 3950 db->db_blkptr = (blkptr_t *)parent->db.db_data + 3951 (db->db_blkid & ((1ULL << epbs) - 1)); 3952 DBUF_VERIFY(db); 3953 } 3954 } 3955 3956 static void 3957 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3958 { 3959 dmu_buf_impl_t *db = dr->dr_dbuf; 3960 void *data = dr->dt.dl.dr_data; 3961 3962 ASSERT0(db->db_level); 3963 ASSERT(MUTEX_HELD(&db->db_mtx)); 3964 ASSERT(db->db_blkid == DMU_BONUS_BLKID); 3965 ASSERT(data != NULL); 3966 3967 dnode_t *dn = dr->dr_dnode; 3968 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 3969 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 3970 bcopy(data, DN_BONUS(dn->dn_phys), DN_MAX_BONUS_LEN(dn->dn_phys)); 3971 3972 dbuf_sync_leaf_verify_bonus_dnode(dr); 3973 3974 dbuf_undirty_bonus(dr); 3975 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 3976 } 3977 3978 /* 3979 * When syncing out a blocks of dnodes, adjust the block to deal with 3980 * encryption. Normally, we make sure the block is decrypted before writing 3981 * it. If we have crypt params, then we are writing a raw (encrypted) block, 3982 * from a raw receive. In this case, set the ARC buf's crypt params so 3983 * that the BP will be filled with the correct byteorder, salt, iv, and mac. 3984 */ 3985 static void 3986 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) 3987 { 3988 int err; 3989 dmu_buf_impl_t *db = dr->dr_dbuf; 3990 3991 ASSERT(MUTEX_HELD(&db->db_mtx)); 3992 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 3993 ASSERT3U(db->db_level, ==, 0); 3994 3995 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { 3996 zbookmark_phys_t zb; 3997 3998 /* 3999 * Unfortunately, there is currently no mechanism for 4000 * syncing context to handle decryption errors. An error 4001 * here is only possible if an attacker maliciously 4002 * changed a dnode block and updated the associated 4003 * checksums going up the block tree. 4004 */ 4005 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 4006 db->db.db_object, db->db_level, db->db_blkid); 4007 err = arc_untransform(db->db_buf, db->db_objset->os_spa, 4008 &zb, B_TRUE); 4009 if (err) 4010 panic("Invalid dnode block MAC"); 4011 } else if (dr->dt.dl.dr_has_raw_params) { 4012 (void) arc_release(dr->dt.dl.dr_data, db); 4013 arc_convert_to_raw(dr->dt.dl.dr_data, 4014 dmu_objset_id(db->db_objset), 4015 dr->dt.dl.dr_byteorder, DMU_OT_DNODE, 4016 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); 4017 } 4018 } 4019 4020 /* 4021 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it 4022 * is critical the we not allow the compiler to inline this function in to 4023 * dbuf_sync_list() thereby drastically bloating the stack usage. 4024 */ 4025 noinline static void 4026 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4027 { 4028 dmu_buf_impl_t *db = dr->dr_dbuf; 4029 dnode_t *dn = dr->dr_dnode; 4030 4031 ASSERT(dmu_tx_is_syncing(tx)); 4032 4033 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4034 4035 mutex_enter(&db->db_mtx); 4036 4037 ASSERT(db->db_level > 0); 4038 DBUF_VERIFY(db); 4039 4040 /* Read the block if it hasn't been read yet. */ 4041 if (db->db_buf == NULL) { 4042 mutex_exit(&db->db_mtx); 4043 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 4044 mutex_enter(&db->db_mtx); 4045 } 4046 ASSERT3U(db->db_state, ==, DB_CACHED); 4047 ASSERT(db->db_buf != NULL); 4048 4049 /* Indirect block size must match what the dnode thinks it is. */ 4050 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4051 dbuf_check_blkptr(dn, db); 4052 4053 /* Provide the pending dirty record to child dbufs */ 4054 db->db_data_pending = dr; 4055 4056 mutex_exit(&db->db_mtx); 4057 4058 dbuf_write(dr, db->db_buf, tx); 4059 4060 zio_t *zio = dr->dr_zio; 4061 mutex_enter(&dr->dt.di.dr_mtx); 4062 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 4063 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4064 mutex_exit(&dr->dt.di.dr_mtx); 4065 zio_nowait(zio); 4066 } 4067 4068 /* 4069 * Verify that the size of the data in our bonus buffer does not exceed 4070 * its recorded size. 4071 * 4072 * The purpose of this verification is to catch any cases in development 4073 * where the size of a phys structure (i.e space_map_phys_t) grows and, 4074 * due to incorrect feature management, older pools expect to read more 4075 * data even though they didn't actually write it to begin with. 4076 * 4077 * For a example, this would catch an error in the feature logic where we 4078 * open an older pool and we expect to write the space map histogram of 4079 * a space map with size SPACE_MAP_SIZE_V0. 4080 */ 4081 static void 4082 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr) 4083 { 4084 #ifdef ZFS_DEBUG 4085 dnode_t *dn = dr->dr_dnode; 4086 4087 /* 4088 * Encrypted bonus buffers can have data past their bonuslen. 4089 * Skip the verification of these blocks. 4090 */ 4091 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)) 4092 return; 4093 4094 uint16_t bonuslen = dn->dn_phys->dn_bonuslen; 4095 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 4096 ASSERT3U(bonuslen, <=, maxbonuslen); 4097 4098 arc_buf_t *datap = dr->dt.dl.dr_data; 4099 char *datap_end = ((char *)datap) + bonuslen; 4100 char *datap_max = ((char *)datap) + maxbonuslen; 4101 4102 /* ensure that everything is zero after our data */ 4103 for (; datap_end < datap_max; datap_end++) 4104 ASSERT(*datap_end == 0); 4105 #endif 4106 } 4107 4108 static blkptr_t * 4109 dbuf_lightweight_bp(dbuf_dirty_record_t *dr) 4110 { 4111 /* This must be a lightweight dirty record. */ 4112 ASSERT3P(dr->dr_dbuf, ==, NULL); 4113 dnode_t *dn = dr->dr_dnode; 4114 4115 if (dn->dn_phys->dn_nlevels == 1) { 4116 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr); 4117 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]); 4118 } else { 4119 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf; 4120 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 4121 VERIFY3U(parent_db->db_level, ==, 1); 4122 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn); 4123 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid); 4124 blkptr_t *bp = parent_db->db.db_data; 4125 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]); 4126 } 4127 } 4128 4129 static void 4130 dbuf_lightweight_ready(zio_t *zio) 4131 { 4132 dbuf_dirty_record_t *dr = zio->io_private; 4133 blkptr_t *bp = zio->io_bp; 4134 4135 if (zio->io_error != 0) 4136 return; 4137 4138 dnode_t *dn = dr->dr_dnode; 4139 4140 blkptr_t *bp_orig = dbuf_lightweight_bp(dr); 4141 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4142 int64_t delta = bp_get_dsize_sync(spa, bp) - 4143 bp_get_dsize_sync(spa, bp_orig); 4144 dnode_diduse_space(dn, delta); 4145 4146 uint64_t blkid = dr->dt.dll.dr_blkid; 4147 mutex_enter(&dn->dn_mtx); 4148 if (blkid > dn->dn_phys->dn_maxblkid) { 4149 ASSERT0(dn->dn_objset->os_raw_receive); 4150 dn->dn_phys->dn_maxblkid = blkid; 4151 } 4152 mutex_exit(&dn->dn_mtx); 4153 4154 if (!BP_IS_EMBEDDED(bp)) { 4155 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1; 4156 BP_SET_FILL(bp, fill); 4157 } 4158 4159 dmu_buf_impl_t *parent_db; 4160 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1); 4161 if (dr->dr_parent == NULL) { 4162 parent_db = dn->dn_dbuf; 4163 } else { 4164 parent_db = dr->dr_parent->dr_dbuf; 4165 } 4166 rw_enter(&parent_db->db_rwlock, RW_WRITER); 4167 *bp_orig = *bp; 4168 rw_exit(&parent_db->db_rwlock); 4169 } 4170 4171 static void 4172 dbuf_lightweight_physdone(zio_t *zio) 4173 { 4174 dbuf_dirty_record_t *dr = zio->io_private; 4175 dsl_pool_t *dp = spa_get_dsl(zio->io_spa); 4176 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 4177 4178 /* 4179 * The callback will be called io_phys_children times. Retire one 4180 * portion of our dirty space each time we are called. Any rounding 4181 * error will be cleaned up by dbuf_lightweight_done(). 4182 */ 4183 int delta = dr->dr_accounted / zio->io_phys_children; 4184 dsl_pool_undirty_space(dp, delta, zio->io_txg); 4185 } 4186 4187 static void 4188 dbuf_lightweight_done(zio_t *zio) 4189 { 4190 dbuf_dirty_record_t *dr = zio->io_private; 4191 4192 VERIFY0(zio->io_error); 4193 4194 objset_t *os = dr->dr_dnode->dn_objset; 4195 dmu_tx_t *tx = os->os_synctx; 4196 4197 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4198 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4199 } else { 4200 dsl_dataset_t *ds = os->os_dsl_dataset; 4201 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE); 4202 dsl_dataset_block_born(ds, zio->io_bp, tx); 4203 } 4204 4205 /* 4206 * See comment in dbuf_write_done(). 4207 */ 4208 if (zio->io_phys_children == 0) { 4209 dsl_pool_undirty_space(dmu_objset_pool(os), 4210 dr->dr_accounted, zio->io_txg); 4211 } else { 4212 dsl_pool_undirty_space(dmu_objset_pool(os), 4213 dr->dr_accounted % zio->io_phys_children, zio->io_txg); 4214 } 4215 4216 abd_free(dr->dt.dll.dr_abd); 4217 kmem_free(dr, sizeof (*dr)); 4218 } 4219 4220 noinline static void 4221 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4222 { 4223 dnode_t *dn = dr->dr_dnode; 4224 zio_t *pio; 4225 if (dn->dn_phys->dn_nlevels == 1) { 4226 pio = dn->dn_zio; 4227 } else { 4228 pio = dr->dr_parent->dr_zio; 4229 } 4230 4231 zbookmark_phys_t zb = { 4232 .zb_objset = dmu_objset_id(dn->dn_objset), 4233 .zb_object = dn->dn_object, 4234 .zb_level = 0, 4235 .zb_blkid = dr->dt.dll.dr_blkid, 4236 }; 4237 4238 /* 4239 * See comment in dbuf_write(). This is so that zio->io_bp_orig 4240 * will have the old BP in dbuf_lightweight_done(). 4241 */ 4242 dr->dr_bp_copy = *dbuf_lightweight_bp(dr); 4243 4244 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset), 4245 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd, 4246 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd), 4247 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL, 4248 dbuf_lightweight_physdone, dbuf_lightweight_done, dr, 4249 ZIO_PRIORITY_ASYNC_WRITE, 4250 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb); 4251 4252 zio_nowait(dr->dr_zio); 4253 } 4254 4255 /* 4256 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is 4257 * critical the we not allow the compiler to inline this function in to 4258 * dbuf_sync_list() thereby drastically bloating the stack usage. 4259 */ 4260 noinline static void 4261 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4262 { 4263 arc_buf_t **datap = &dr->dt.dl.dr_data; 4264 dmu_buf_impl_t *db = dr->dr_dbuf; 4265 dnode_t *dn = dr->dr_dnode; 4266 objset_t *os; 4267 uint64_t txg = tx->tx_txg; 4268 4269 ASSERT(dmu_tx_is_syncing(tx)); 4270 4271 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4272 4273 mutex_enter(&db->db_mtx); 4274 /* 4275 * To be synced, we must be dirtied. But we 4276 * might have been freed after the dirty. 4277 */ 4278 if (db->db_state == DB_UNCACHED) { 4279 /* This buffer has been freed since it was dirtied */ 4280 ASSERT(db->db.db_data == NULL); 4281 } else if (db->db_state == DB_FILL) { 4282 /* This buffer was freed and is now being re-filled */ 4283 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 4284 } else { 4285 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 4286 } 4287 DBUF_VERIFY(db); 4288 4289 if (db->db_blkid == DMU_SPILL_BLKID) { 4290 mutex_enter(&dn->dn_mtx); 4291 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 4292 /* 4293 * In the previous transaction group, the bonus buffer 4294 * was entirely used to store the attributes for the 4295 * dnode which overrode the dn_spill field. However, 4296 * when adding more attributes to the file a spill 4297 * block was required to hold the extra attributes. 4298 * 4299 * Make sure to clear the garbage left in the dn_spill 4300 * field from the previous attributes in the bonus 4301 * buffer. Otherwise, after writing out the spill 4302 * block to the new allocated dva, it will free 4303 * the old block pointed to by the invalid dn_spill. 4304 */ 4305 db->db_blkptr = NULL; 4306 } 4307 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 4308 mutex_exit(&dn->dn_mtx); 4309 } 4310 4311 /* 4312 * If this is a bonus buffer, simply copy the bonus data into the 4313 * dnode. It will be written out when the dnode is synced (and it 4314 * will be synced, since it must have been dirty for dbuf_sync to 4315 * be called). 4316 */ 4317 if (db->db_blkid == DMU_BONUS_BLKID) { 4318 ASSERT(dr->dr_dbuf == db); 4319 dbuf_sync_bonus(dr, tx); 4320 return; 4321 } 4322 4323 os = dn->dn_objset; 4324 4325 /* 4326 * This function may have dropped the db_mtx lock allowing a dmu_sync 4327 * operation to sneak in. As a result, we need to ensure that we 4328 * don't check the dr_override_state until we have returned from 4329 * dbuf_check_blkptr. 4330 */ 4331 dbuf_check_blkptr(dn, db); 4332 4333 /* 4334 * If this buffer is in the middle of an immediate write, 4335 * wait for the synchronous IO to complete. 4336 */ 4337 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 4338 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 4339 cv_wait(&db->db_changed, &db->db_mtx); 4340 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 4341 } 4342 4343 /* 4344 * If this is a dnode block, ensure it is appropriately encrypted 4345 * or decrypted, depending on what we are writing to it this txg. 4346 */ 4347 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) 4348 dbuf_prepare_encrypted_dnode_leaf(dr); 4349 4350 if (db->db_state != DB_NOFILL && 4351 dn->dn_object != DMU_META_DNODE_OBJECT && 4352 zfs_refcount_count(&db->db_holds) > 1 && 4353 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 4354 *datap == db->db_buf) { 4355 /* 4356 * If this buffer is currently "in use" (i.e., there 4357 * are active holds and db_data still references it), 4358 * then make a copy before we start the write so that 4359 * any modifications from the open txg will not leak 4360 * into this write. 4361 * 4362 * NOTE: this copy does not need to be made for 4363 * objects only modified in the syncing context (e.g. 4364 * DNONE_DNODE blocks). 4365 */ 4366 *datap = dbuf_alloc_arcbuf_from_arcbuf(db, db->db_buf); 4367 bcopy(db->db.db_data, (*datap)->b_data, arc_buf_size(*datap)); 4368 } 4369 db->db_data_pending = dr; 4370 4371 mutex_exit(&db->db_mtx); 4372 4373 dbuf_write(dr, *datap, tx); 4374 4375 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4376 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 4377 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr); 4378 } else { 4379 zio_nowait(dr->dr_zio); 4380 } 4381 } 4382 4383 void 4384 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 4385 { 4386 dbuf_dirty_record_t *dr; 4387 4388 while ((dr = list_head(list))) { 4389 if (dr->dr_zio != NULL) { 4390 /* 4391 * If we find an already initialized zio then we 4392 * are processing the meta-dnode, and we have finished. 4393 * The dbufs for all dnodes are put back on the list 4394 * during processing, so that we can zio_wait() 4395 * these IOs after initiating all child IOs. 4396 */ 4397 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 4398 DMU_META_DNODE_OBJECT); 4399 break; 4400 } 4401 list_remove(list, dr); 4402 if (dr->dr_dbuf == NULL) { 4403 dbuf_sync_lightweight(dr, tx); 4404 } else { 4405 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 4406 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 4407 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 4408 } 4409 if (dr->dr_dbuf->db_level > 0) 4410 dbuf_sync_indirect(dr, tx); 4411 else 4412 dbuf_sync_leaf(dr, tx); 4413 } 4414 } 4415 } 4416 4417 /* ARGSUSED */ 4418 static void 4419 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4420 { 4421 dmu_buf_impl_t *db = vdb; 4422 dnode_t *dn; 4423 blkptr_t *bp = zio->io_bp; 4424 blkptr_t *bp_orig = &zio->io_bp_orig; 4425 spa_t *spa = zio->io_spa; 4426 int64_t delta; 4427 uint64_t fill = 0; 4428 int i; 4429 4430 ASSERT3P(db->db_blkptr, !=, NULL); 4431 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 4432 4433 DB_DNODE_ENTER(db); 4434 dn = DB_DNODE(db); 4435 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 4436 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 4437 zio->io_prev_space_delta = delta; 4438 4439 if (bp->blk_birth != 0) { 4440 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 4441 BP_GET_TYPE(bp) == dn->dn_type) || 4442 (db->db_blkid == DMU_SPILL_BLKID && 4443 BP_GET_TYPE(bp) == dn->dn_bonustype) || 4444 BP_IS_EMBEDDED(bp)); 4445 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 4446 } 4447 4448 mutex_enter(&db->db_mtx); 4449 4450 #ifdef ZFS_DEBUG 4451 if (db->db_blkid == DMU_SPILL_BLKID) { 4452 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4453 ASSERT(!(BP_IS_HOLE(bp)) && 4454 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4455 } 4456 #endif 4457 4458 if (db->db_level == 0) { 4459 mutex_enter(&dn->dn_mtx); 4460 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 4461 db->db_blkid != DMU_SPILL_BLKID) { 4462 ASSERT0(db->db_objset->os_raw_receive); 4463 dn->dn_phys->dn_maxblkid = db->db_blkid; 4464 } 4465 mutex_exit(&dn->dn_mtx); 4466 4467 if (dn->dn_type == DMU_OT_DNODE) { 4468 i = 0; 4469 while (i < db->db.db_size) { 4470 dnode_phys_t *dnp = 4471 (void *)(((char *)db->db.db_data) + i); 4472 4473 i += DNODE_MIN_SIZE; 4474 if (dnp->dn_type != DMU_OT_NONE) { 4475 fill++; 4476 i += dnp->dn_extra_slots * 4477 DNODE_MIN_SIZE; 4478 } 4479 } 4480 } else { 4481 if (BP_IS_HOLE(bp)) { 4482 fill = 0; 4483 } else { 4484 fill = 1; 4485 } 4486 } 4487 } else { 4488 blkptr_t *ibp = db->db.db_data; 4489 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4490 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 4491 if (BP_IS_HOLE(ibp)) 4492 continue; 4493 fill += BP_GET_FILL(ibp); 4494 } 4495 } 4496 DB_DNODE_EXIT(db); 4497 4498 if (!BP_IS_EMBEDDED(bp)) 4499 BP_SET_FILL(bp, fill); 4500 4501 mutex_exit(&db->db_mtx); 4502 4503 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG); 4504 *db->db_blkptr = *bp; 4505 dmu_buf_unlock_parent(db, dblt, FTAG); 4506 } 4507 4508 /* ARGSUSED */ 4509 /* 4510 * This function gets called just prior to running through the compression 4511 * stage of the zio pipeline. If we're an indirect block comprised of only 4512 * holes, then we want this indirect to be compressed away to a hole. In 4513 * order to do that we must zero out any information about the holes that 4514 * this indirect points to prior to before we try to compress it. 4515 */ 4516 static void 4517 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4518 { 4519 dmu_buf_impl_t *db = vdb; 4520 dnode_t *dn; 4521 blkptr_t *bp; 4522 unsigned int epbs, i; 4523 4524 ASSERT3U(db->db_level, >, 0); 4525 DB_DNODE_ENTER(db); 4526 dn = DB_DNODE(db); 4527 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4528 ASSERT3U(epbs, <, 31); 4529 4530 /* Determine if all our children are holes */ 4531 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { 4532 if (!BP_IS_HOLE(bp)) 4533 break; 4534 } 4535 4536 /* 4537 * If all the children are holes, then zero them all out so that 4538 * we may get compressed away. 4539 */ 4540 if (i == 1ULL << epbs) { 4541 /* 4542 * We only found holes. Grab the rwlock to prevent 4543 * anybody from reading the blocks we're about to 4544 * zero out. 4545 */ 4546 rw_enter(&db->db_rwlock, RW_WRITER); 4547 bzero(db->db.db_data, db->db.db_size); 4548 rw_exit(&db->db_rwlock); 4549 } 4550 DB_DNODE_EXIT(db); 4551 } 4552 4553 /* 4554 * The SPA will call this callback several times for each zio - once 4555 * for every physical child i/o (zio->io_phys_children times). This 4556 * allows the DMU to monitor the progress of each logical i/o. For example, 4557 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 4558 * block. There may be a long delay before all copies/fragments are completed, 4559 * so this callback allows us to retire dirty space gradually, as the physical 4560 * i/os complete. 4561 */ 4562 /* ARGSUSED */ 4563 static void 4564 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 4565 { 4566 dmu_buf_impl_t *db = arg; 4567 objset_t *os = db->db_objset; 4568 dsl_pool_t *dp = dmu_objset_pool(os); 4569 dbuf_dirty_record_t *dr; 4570 int delta = 0; 4571 4572 dr = db->db_data_pending; 4573 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 4574 4575 /* 4576 * The callback will be called io_phys_children times. Retire one 4577 * portion of our dirty space each time we are called. Any rounding 4578 * error will be cleaned up by dbuf_write_done(). 4579 */ 4580 delta = dr->dr_accounted / zio->io_phys_children; 4581 dsl_pool_undirty_space(dp, delta, zio->io_txg); 4582 } 4583 4584 /* ARGSUSED */ 4585 static void 4586 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 4587 { 4588 dmu_buf_impl_t *db = vdb; 4589 blkptr_t *bp_orig = &zio->io_bp_orig; 4590 blkptr_t *bp = db->db_blkptr; 4591 objset_t *os = db->db_objset; 4592 dmu_tx_t *tx = os->os_synctx; 4593 4594 ASSERT0(zio->io_error); 4595 ASSERT(db->db_blkptr == bp); 4596 4597 /* 4598 * For nopwrites and rewrites we ensure that the bp matches our 4599 * original and bypass all the accounting. 4600 */ 4601 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4602 ASSERT(BP_EQUAL(bp, bp_orig)); 4603 } else { 4604 dsl_dataset_t *ds = os->os_dsl_dataset; 4605 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 4606 dsl_dataset_block_born(ds, bp, tx); 4607 } 4608 4609 mutex_enter(&db->db_mtx); 4610 4611 DBUF_VERIFY(db); 4612 4613 dbuf_dirty_record_t *dr = db->db_data_pending; 4614 dnode_t *dn = dr->dr_dnode; 4615 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4616 ASSERT(dr->dr_dbuf == db); 4617 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 4618 list_remove(&db->db_dirty_records, dr); 4619 4620 #ifdef ZFS_DEBUG 4621 if (db->db_blkid == DMU_SPILL_BLKID) { 4622 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4623 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 4624 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4625 } 4626 #endif 4627 4628 if (db->db_level == 0) { 4629 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 4630 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 4631 if (db->db_state != DB_NOFILL) { 4632 if (dr->dt.dl.dr_data != db->db_buf) 4633 arc_buf_destroy(dr->dt.dl.dr_data, db); 4634 } 4635 } else { 4636 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4637 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 4638 if (!BP_IS_HOLE(db->db_blkptr)) { 4639 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift - 4640 SPA_BLKPTRSHIFT; 4641 ASSERT3U(db->db_blkid, <=, 4642 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 4643 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 4644 db->db.db_size); 4645 } 4646 mutex_destroy(&dr->dt.di.dr_mtx); 4647 list_destroy(&dr->dt.di.dr_children); 4648 } 4649 4650 cv_broadcast(&db->db_changed); 4651 ASSERT(db->db_dirtycnt > 0); 4652 db->db_dirtycnt -= 1; 4653 db->db_data_pending = NULL; 4654 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4655 4656 /* 4657 * If we didn't do a physical write in this ZIO and we 4658 * still ended up here, it means that the space of the 4659 * dbuf that we just released (and undirtied) above hasn't 4660 * been marked as undirtied in the pool's accounting. 4661 * 4662 * Thus, we undirty that space in the pool's view of the 4663 * world here. For physical writes this type of update 4664 * happens in dbuf_write_physdone(). 4665 * 4666 * If we did a physical write, cleanup any rounding errors 4667 * that came up due to writing multiple copies of a block 4668 * on disk [see dbuf_write_physdone()]. 4669 */ 4670 if (zio->io_phys_children == 0) { 4671 dsl_pool_undirty_space(dmu_objset_pool(os), 4672 dr->dr_accounted, zio->io_txg); 4673 } else { 4674 dsl_pool_undirty_space(dmu_objset_pool(os), 4675 dr->dr_accounted % zio->io_phys_children, zio->io_txg); 4676 } 4677 4678 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 4679 } 4680 4681 static void 4682 dbuf_write_nofill_ready(zio_t *zio) 4683 { 4684 dbuf_write_ready(zio, NULL, zio->io_private); 4685 } 4686 4687 static void 4688 dbuf_write_nofill_done(zio_t *zio) 4689 { 4690 dbuf_write_done(zio, NULL, zio->io_private); 4691 } 4692 4693 static void 4694 dbuf_write_override_ready(zio_t *zio) 4695 { 4696 dbuf_dirty_record_t *dr = zio->io_private; 4697 dmu_buf_impl_t *db = dr->dr_dbuf; 4698 4699 dbuf_write_ready(zio, NULL, db); 4700 } 4701 4702 static void 4703 dbuf_write_override_done(zio_t *zio) 4704 { 4705 dbuf_dirty_record_t *dr = zio->io_private; 4706 dmu_buf_impl_t *db = dr->dr_dbuf; 4707 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 4708 4709 mutex_enter(&db->db_mtx); 4710 if (!BP_EQUAL(zio->io_bp, obp)) { 4711 if (!BP_IS_HOLE(obp)) 4712 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 4713 arc_release(dr->dt.dl.dr_data, db); 4714 } 4715 mutex_exit(&db->db_mtx); 4716 4717 dbuf_write_done(zio, NULL, db); 4718 4719 if (zio->io_abd != NULL) 4720 abd_free(zio->io_abd); 4721 } 4722 4723 typedef struct dbuf_remap_impl_callback_arg { 4724 objset_t *drica_os; 4725 uint64_t drica_blk_birth; 4726 dmu_tx_t *drica_tx; 4727 } dbuf_remap_impl_callback_arg_t; 4728 4729 static void 4730 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 4731 void *arg) 4732 { 4733 dbuf_remap_impl_callback_arg_t *drica = arg; 4734 objset_t *os = drica->drica_os; 4735 spa_t *spa = dmu_objset_spa(os); 4736 dmu_tx_t *tx = drica->drica_tx; 4737 4738 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4739 4740 if (os == spa_meta_objset(spa)) { 4741 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 4742 } else { 4743 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 4744 size, drica->drica_blk_birth, tx); 4745 } 4746 } 4747 4748 static void 4749 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) 4750 { 4751 blkptr_t bp_copy = *bp; 4752 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4753 dbuf_remap_impl_callback_arg_t drica; 4754 4755 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4756 4757 drica.drica_os = dn->dn_objset; 4758 drica.drica_blk_birth = bp->blk_birth; 4759 drica.drica_tx = tx; 4760 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 4761 &drica)) { 4762 /* 4763 * If the blkptr being remapped is tracked by a livelist, 4764 * then we need to make sure the livelist reflects the update. 4765 * First, cancel out the old blkptr by appending a 'FREE' 4766 * entry. Next, add an 'ALLOC' to track the new version. This 4767 * way we avoid trying to free an inaccurate blkptr at delete. 4768 * Note that embedded blkptrs are not tracked in livelists. 4769 */ 4770 if (dn->dn_objset != spa_meta_objset(spa)) { 4771 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset); 4772 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && 4773 bp->blk_birth > ds->ds_dir->dd_origin_txg) { 4774 ASSERT(!BP_IS_EMBEDDED(bp)); 4775 ASSERT(dsl_dir_is_clone(ds->ds_dir)); 4776 ASSERT(spa_feature_is_enabled(spa, 4777 SPA_FEATURE_LIVELIST)); 4778 bplist_append(&ds->ds_dir->dd_pending_frees, 4779 bp); 4780 bplist_append(&ds->ds_dir->dd_pending_allocs, 4781 &bp_copy); 4782 } 4783 } 4784 4785 /* 4786 * The db_rwlock prevents dbuf_read_impl() from 4787 * dereferencing the BP while we are changing it. To 4788 * avoid lock contention, only grab it when we are actually 4789 * changing the BP. 4790 */ 4791 if (rw != NULL) 4792 rw_enter(rw, RW_WRITER); 4793 *bp = bp_copy; 4794 if (rw != NULL) 4795 rw_exit(rw); 4796 } 4797 } 4798 4799 /* 4800 * Remap any existing BP's to concrete vdevs, if possible. 4801 */ 4802 static void 4803 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 4804 { 4805 spa_t *spa = dmu_objset_spa(db->db_objset); 4806 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4807 4808 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 4809 return; 4810 4811 if (db->db_level > 0) { 4812 blkptr_t *bp = db->db.db_data; 4813 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 4814 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx); 4815 } 4816 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 4817 dnode_phys_t *dnp = db->db.db_data; 4818 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 4819 DMU_OT_DNODE); 4820 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; 4821 i += dnp[i].dn_extra_slots + 1) { 4822 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 4823 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL : 4824 &dn->dn_dbuf->db_rwlock); 4825 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock, 4826 tx); 4827 } 4828 } 4829 } 4830 } 4831 4832 4833 /* Issue I/O to commit a dirty buffer to disk. */ 4834 static void 4835 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 4836 { 4837 dmu_buf_impl_t *db = dr->dr_dbuf; 4838 dnode_t *dn = dr->dr_dnode; 4839 objset_t *os; 4840 dmu_buf_impl_t *parent = db->db_parent; 4841 uint64_t txg = tx->tx_txg; 4842 zbookmark_phys_t zb; 4843 zio_prop_t zp; 4844 zio_t *pio; /* parent I/O */ 4845 int wp_flag = 0; 4846 4847 ASSERT(dmu_tx_is_syncing(tx)); 4848 4849 os = dn->dn_objset; 4850 4851 if (db->db_state != DB_NOFILL) { 4852 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 4853 /* 4854 * Private object buffers are released here rather 4855 * than in dbuf_dirty() since they are only modified 4856 * in the syncing context and we don't want the 4857 * overhead of making multiple copies of the data. 4858 */ 4859 if (BP_IS_HOLE(db->db_blkptr)) { 4860 arc_buf_thaw(data); 4861 } else { 4862 dbuf_release_bp(db); 4863 } 4864 dbuf_remap(dn, db, tx); 4865 } 4866 } 4867 4868 if (parent != dn->dn_dbuf) { 4869 /* Our parent is an indirect block. */ 4870 /* We have a dirty parent that has been scheduled for write. */ 4871 ASSERT(parent && parent->db_data_pending); 4872 /* Our parent's buffer is one level closer to the dnode. */ 4873 ASSERT(db->db_level == parent->db_level-1); 4874 /* 4875 * We're about to modify our parent's db_data by modifying 4876 * our block pointer, so the parent must be released. 4877 */ 4878 ASSERT(arc_released(parent->db_buf)); 4879 pio = parent->db_data_pending->dr_zio; 4880 } else { 4881 /* Our parent is the dnode itself. */ 4882 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 4883 db->db_blkid != DMU_SPILL_BLKID) || 4884 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 4885 if (db->db_blkid != DMU_SPILL_BLKID) 4886 ASSERT3P(db->db_blkptr, ==, 4887 &dn->dn_phys->dn_blkptr[db->db_blkid]); 4888 pio = dn->dn_zio; 4889 } 4890 4891 ASSERT(db->db_level == 0 || data == db->db_buf); 4892 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 4893 ASSERT(pio); 4894 4895 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 4896 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 4897 db->db.db_object, db->db_level, db->db_blkid); 4898 4899 if (db->db_blkid == DMU_SPILL_BLKID) 4900 wp_flag = WP_SPILL; 4901 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 4902 4903 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 4904 4905 /* 4906 * We copy the blkptr now (rather than when we instantiate the dirty 4907 * record), because its value can change between open context and 4908 * syncing context. We do not need to hold dn_struct_rwlock to read 4909 * db_blkptr because we are in syncing context. 4910 */ 4911 dr->dr_bp_copy = *db->db_blkptr; 4912 4913 if (db->db_level == 0 && 4914 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 4915 /* 4916 * The BP for this block has been provided by open context 4917 * (by dmu_sync() or dmu_buf_write_embedded()). 4918 */ 4919 abd_t *contents = (data != NULL) ? 4920 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 4921 4922 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy, 4923 contents, db->db.db_size, db->db.db_size, &zp, 4924 dbuf_write_override_ready, NULL, NULL, 4925 dbuf_write_override_done, 4926 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 4927 mutex_enter(&db->db_mtx); 4928 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 4929 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 4930 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 4931 mutex_exit(&db->db_mtx); 4932 } else if (db->db_state == DB_NOFILL) { 4933 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 4934 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 4935 dr->dr_zio = zio_write(pio, os->os_spa, txg, 4936 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 4937 dbuf_write_nofill_ready, NULL, NULL, 4938 dbuf_write_nofill_done, db, 4939 ZIO_PRIORITY_ASYNC_WRITE, 4940 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 4941 } else { 4942 ASSERT(arc_released(data)); 4943 4944 /* 4945 * For indirect blocks, we want to setup the children 4946 * ready callback so that we can properly handle an indirect 4947 * block that only contains holes. 4948 */ 4949 arc_write_done_func_t *children_ready_cb = NULL; 4950 if (db->db_level != 0) 4951 children_ready_cb = dbuf_write_children_ready; 4952 4953 dr->dr_zio = arc_write(pio, os->os_spa, txg, 4954 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 4955 &zp, dbuf_write_ready, 4956 children_ready_cb, dbuf_write_physdone, 4957 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, 4958 ZIO_FLAG_MUSTSUCCEED, &zb); 4959 } 4960 } 4961 4962 EXPORT_SYMBOL(dbuf_find); 4963 EXPORT_SYMBOL(dbuf_is_metadata); 4964 EXPORT_SYMBOL(dbuf_destroy); 4965 EXPORT_SYMBOL(dbuf_loan_arcbuf); 4966 EXPORT_SYMBOL(dbuf_whichblock); 4967 EXPORT_SYMBOL(dbuf_read); 4968 EXPORT_SYMBOL(dbuf_unoverride); 4969 EXPORT_SYMBOL(dbuf_free_range); 4970 EXPORT_SYMBOL(dbuf_new_size); 4971 EXPORT_SYMBOL(dbuf_release_bp); 4972 EXPORT_SYMBOL(dbuf_dirty); 4973 EXPORT_SYMBOL(dmu_buf_set_crypt_params); 4974 EXPORT_SYMBOL(dmu_buf_will_dirty); 4975 EXPORT_SYMBOL(dmu_buf_is_dirty); 4976 EXPORT_SYMBOL(dmu_buf_will_not_fill); 4977 EXPORT_SYMBOL(dmu_buf_will_fill); 4978 EXPORT_SYMBOL(dmu_buf_fill_done); 4979 EXPORT_SYMBOL(dmu_buf_rele); 4980 EXPORT_SYMBOL(dbuf_assign_arcbuf); 4981 EXPORT_SYMBOL(dbuf_prefetch); 4982 EXPORT_SYMBOL(dbuf_hold_impl); 4983 EXPORT_SYMBOL(dbuf_hold); 4984 EXPORT_SYMBOL(dbuf_hold_level); 4985 EXPORT_SYMBOL(dbuf_create_bonus); 4986 EXPORT_SYMBOL(dbuf_spill_set_blksz); 4987 EXPORT_SYMBOL(dbuf_rm_spill); 4988 EXPORT_SYMBOL(dbuf_add_ref); 4989 EXPORT_SYMBOL(dbuf_rele); 4990 EXPORT_SYMBOL(dbuf_rele_and_unlock); 4991 EXPORT_SYMBOL(dbuf_refcount); 4992 EXPORT_SYMBOL(dbuf_sync_list); 4993 EXPORT_SYMBOL(dmu_buf_set_user); 4994 EXPORT_SYMBOL(dmu_buf_set_user_ie); 4995 EXPORT_SYMBOL(dmu_buf_get_user); 4996 EXPORT_SYMBOL(dmu_buf_get_blkptr); 4997 4998 /* BEGIN CSTYLED */ 4999 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, ULONG, ZMOD_RW, 5000 "Maximum size in bytes of the dbuf cache."); 5001 5002 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW, 5003 "Percentage over dbuf_cache_max_bytes when dbufs must be evicted " 5004 "directly."); 5005 5006 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW, 5007 "Percentage below dbuf_cache_max_bytes when the evict thread stops " 5008 "evicting dbufs."); 5009 5010 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW, 5011 "Maximum size in bytes of the dbuf metadata cache."); 5012 5013 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW, 5014 "Set the size of the dbuf cache to a log2 fraction of arc size."); 5015 5016 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW, 5017 "Set the size of the dbuf metadata cache to a log2 fraction of arc " 5018 "size."); 5019 /* END CSTYLED */ 5020