1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright (c) 2019, Klara Inc. 28 * Copyright (c) 2019, Allan Jude 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/arc.h> 33 #include <sys/dmu.h> 34 #include <sys/dmu_send.h> 35 #include <sys/dmu_impl.h> 36 #include <sys/dbuf.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/dsl_dataset.h> 39 #include <sys/dsl_dir.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/spa.h> 42 #include <sys/zio.h> 43 #include <sys/dmu_zfetch.h> 44 #include <sys/sa.h> 45 #include <sys/sa_impl.h> 46 #include <sys/zfeature.h> 47 #include <sys/blkptr.h> 48 #include <sys/range_tree.h> 49 #include <sys/trace_zfs.h> 50 #include <sys/callb.h> 51 #include <sys/abd.h> 52 #include <sys/vdev.h> 53 #include <cityhash.h> 54 #include <sys/spa_impl.h> 55 #include <sys/wmsum.h> 56 #include <sys/vdev_impl.h> 57 58 static kstat_t *dbuf_ksp; 59 60 typedef struct dbuf_stats { 61 /* 62 * Various statistics about the size of the dbuf cache. 63 */ 64 kstat_named_t cache_count; 65 kstat_named_t cache_size_bytes; 66 kstat_named_t cache_size_bytes_max; 67 /* 68 * Statistics regarding the bounds on the dbuf cache size. 69 */ 70 kstat_named_t cache_target_bytes; 71 kstat_named_t cache_lowater_bytes; 72 kstat_named_t cache_hiwater_bytes; 73 /* 74 * Total number of dbuf cache evictions that have occurred. 75 */ 76 kstat_named_t cache_total_evicts; 77 /* 78 * The distribution of dbuf levels in the dbuf cache and 79 * the total size of all dbufs at each level. 80 */ 81 kstat_named_t cache_levels[DN_MAX_LEVELS]; 82 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS]; 83 /* 84 * Statistics about the dbuf hash table. 85 */ 86 kstat_named_t hash_hits; 87 kstat_named_t hash_misses; 88 kstat_named_t hash_collisions; 89 kstat_named_t hash_elements; 90 kstat_named_t hash_elements_max; 91 /* 92 * Number of sublists containing more than one dbuf in the dbuf 93 * hash table. Keep track of the longest hash chain. 94 */ 95 kstat_named_t hash_chains; 96 kstat_named_t hash_chain_max; 97 /* 98 * Number of times a dbuf_create() discovers that a dbuf was 99 * already created and in the dbuf hash table. 100 */ 101 kstat_named_t hash_insert_race; 102 /* 103 * Number of entries in the hash table dbuf and mutex arrays. 104 */ 105 kstat_named_t hash_table_count; 106 kstat_named_t hash_mutex_count; 107 /* 108 * Statistics about the size of the metadata dbuf cache. 109 */ 110 kstat_named_t metadata_cache_count; 111 kstat_named_t metadata_cache_size_bytes; 112 kstat_named_t metadata_cache_size_bytes_max; 113 /* 114 * For diagnostic purposes, this is incremented whenever we can't add 115 * something to the metadata cache because it's full, and instead put 116 * the data in the regular dbuf cache. 117 */ 118 kstat_named_t metadata_cache_overflow; 119 } dbuf_stats_t; 120 121 dbuf_stats_t dbuf_stats = { 122 { "cache_count", KSTAT_DATA_UINT64 }, 123 { "cache_size_bytes", KSTAT_DATA_UINT64 }, 124 { "cache_size_bytes_max", KSTAT_DATA_UINT64 }, 125 { "cache_target_bytes", KSTAT_DATA_UINT64 }, 126 { "cache_lowater_bytes", KSTAT_DATA_UINT64 }, 127 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 }, 128 { "cache_total_evicts", KSTAT_DATA_UINT64 }, 129 { { "cache_levels_N", KSTAT_DATA_UINT64 } }, 130 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } }, 131 { "hash_hits", KSTAT_DATA_UINT64 }, 132 { "hash_misses", KSTAT_DATA_UINT64 }, 133 { "hash_collisions", KSTAT_DATA_UINT64 }, 134 { "hash_elements", KSTAT_DATA_UINT64 }, 135 { "hash_elements_max", KSTAT_DATA_UINT64 }, 136 { "hash_chains", KSTAT_DATA_UINT64 }, 137 { "hash_chain_max", KSTAT_DATA_UINT64 }, 138 { "hash_insert_race", KSTAT_DATA_UINT64 }, 139 { "hash_table_count", KSTAT_DATA_UINT64 }, 140 { "hash_mutex_count", KSTAT_DATA_UINT64 }, 141 { "metadata_cache_count", KSTAT_DATA_UINT64 }, 142 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 }, 143 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 }, 144 { "metadata_cache_overflow", KSTAT_DATA_UINT64 } 145 }; 146 147 struct { 148 wmsum_t cache_count; 149 wmsum_t cache_total_evicts; 150 wmsum_t cache_levels[DN_MAX_LEVELS]; 151 wmsum_t cache_levels_bytes[DN_MAX_LEVELS]; 152 wmsum_t hash_hits; 153 wmsum_t hash_misses; 154 wmsum_t hash_collisions; 155 wmsum_t hash_chains; 156 wmsum_t hash_insert_race; 157 wmsum_t metadata_cache_count; 158 wmsum_t metadata_cache_overflow; 159 } dbuf_sums; 160 161 #define DBUF_STAT_INCR(stat, val) \ 162 wmsum_add(&dbuf_sums.stat, val); 163 #define DBUF_STAT_DECR(stat, val) \ 164 DBUF_STAT_INCR(stat, -(val)); 165 #define DBUF_STAT_BUMP(stat) \ 166 DBUF_STAT_INCR(stat, 1); 167 #define DBUF_STAT_BUMPDOWN(stat) \ 168 DBUF_STAT_INCR(stat, -1); 169 #define DBUF_STAT_MAX(stat, v) { \ 170 uint64_t _m; \ 171 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \ 172 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\ 173 continue; \ 174 } 175 176 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 177 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 178 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr); 179 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags); 180 181 /* 182 * Global data structures and functions for the dbuf cache. 183 */ 184 static kmem_cache_t *dbuf_kmem_cache; 185 static taskq_t *dbu_evict_taskq; 186 187 static kthread_t *dbuf_cache_evict_thread; 188 static kmutex_t dbuf_evict_lock; 189 static kcondvar_t dbuf_evict_cv; 190 static boolean_t dbuf_evict_thread_exit; 191 192 /* 193 * There are two dbuf caches; each dbuf can only be in one of them at a time. 194 * 195 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 196 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 197 * that represent the metadata that describes filesystems/snapshots/ 198 * bookmarks/properties/etc. We only evict from this cache when we export a 199 * pool, to short-circuit as much I/O as possible for all administrative 200 * commands that need the metadata. There is no eviction policy for this 201 * cache, because we try to only include types in it which would occupy a 202 * very small amount of space per object but create a large impact on the 203 * performance of these commands. Instead, after it reaches a maximum size 204 * (which should only happen on very small memory systems with a very large 205 * number of filesystem objects), we stop taking new dbufs into the 206 * metadata cache, instead putting them in the normal dbuf cache. 207 * 208 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 209 * are not currently held but have been recently released. These dbufs 210 * are not eligible for arc eviction until they are aged out of the cache. 211 * Dbufs that are aged out of the cache will be immediately destroyed and 212 * become eligible for arc eviction. 213 * 214 * Dbufs are added to these caches once the last hold is released. If a dbuf is 215 * later accessed and still exists in the dbuf cache, then it will be removed 216 * from the cache and later re-added to the head of the cache. 217 * 218 * If a given dbuf meets the requirements for the metadata cache, it will go 219 * there, otherwise it will be considered for the generic LRU dbuf cache. The 220 * caches and the refcounts tracking their sizes are stored in an array indexed 221 * by those caches' matching enum values (from dbuf_cached_state_t). 222 */ 223 typedef struct dbuf_cache { 224 multilist_t cache; 225 zfs_refcount_t size ____cacheline_aligned; 226 } dbuf_cache_t; 227 dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 228 229 /* Size limits for the caches */ 230 static uint64_t dbuf_cache_max_bytes = UINT64_MAX; 231 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX; 232 233 /* Set the default sizes of the caches to log2 fraction of arc size */ 234 static uint_t dbuf_cache_shift = 5; 235 static uint_t dbuf_metadata_cache_shift = 6; 236 237 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */ 238 static uint_t dbuf_mutex_cache_shift = 0; 239 240 static unsigned long dbuf_cache_target_bytes(void); 241 static unsigned long dbuf_metadata_cache_target_bytes(void); 242 243 /* 244 * The LRU dbuf cache uses a three-stage eviction policy: 245 * - A low water marker designates when the dbuf eviction thread 246 * should stop evicting from the dbuf cache. 247 * - When we reach the maximum size (aka mid water mark), we 248 * signal the eviction thread to run. 249 * - The high water mark indicates when the eviction thread 250 * is unable to keep up with the incoming load and eviction must 251 * happen in the context of the calling thread. 252 * 253 * The dbuf cache: 254 * (max size) 255 * low water mid water hi water 256 * +----------------------------------------+----------+----------+ 257 * | | | | 258 * | | | | 259 * | | | | 260 * | | | | 261 * +----------------------------------------+----------+----------+ 262 * stop signal evict 263 * evicting eviction directly 264 * thread 265 * 266 * The high and low water marks indicate the operating range for the eviction 267 * thread. The low water mark is, by default, 90% of the total size of the 268 * cache and the high water mark is at 110% (both of these percentages can be 269 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 270 * respectively). The eviction thread will try to ensure that the cache remains 271 * within this range by waking up every second and checking if the cache is 272 * above the low water mark. The thread can also be woken up by callers adding 273 * elements into the cache if the cache is larger than the mid water (i.e max 274 * cache size). Once the eviction thread is woken up and eviction is required, 275 * it will continue evicting buffers until it's able to reduce the cache size 276 * to the low water mark. If the cache size continues to grow and hits the high 277 * water mark, then callers adding elements to the cache will begin to evict 278 * directly from the cache until the cache is no longer above the high water 279 * mark. 280 */ 281 282 /* 283 * The percentage above and below the maximum cache size. 284 */ 285 static uint_t dbuf_cache_hiwater_pct = 10; 286 static uint_t dbuf_cache_lowater_pct = 10; 287 288 static int 289 dbuf_cons(void *vdb, void *unused, int kmflag) 290 { 291 (void) unused, (void) kmflag; 292 dmu_buf_impl_t *db = vdb; 293 memset(db, 0, sizeof (dmu_buf_impl_t)); 294 295 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 296 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL); 297 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 298 multilist_link_init(&db->db_cache_link); 299 zfs_refcount_create(&db->db_holds); 300 301 return (0); 302 } 303 304 static void 305 dbuf_dest(void *vdb, void *unused) 306 { 307 (void) unused; 308 dmu_buf_impl_t *db = vdb; 309 mutex_destroy(&db->db_mtx); 310 rw_destroy(&db->db_rwlock); 311 cv_destroy(&db->db_changed); 312 ASSERT(!multilist_link_active(&db->db_cache_link)); 313 zfs_refcount_destroy(&db->db_holds); 314 } 315 316 /* 317 * dbuf hash table routines 318 */ 319 static dbuf_hash_table_t dbuf_hash_table; 320 321 /* 322 * We use Cityhash for this. It's fast, and has good hash properties without 323 * requiring any large static buffers. 324 */ 325 static uint64_t 326 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 327 { 328 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 329 } 330 331 #define DTRACE_SET_STATE(db, why) \ 332 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \ 333 const char *, why) 334 335 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 336 ((dbuf)->db.db_object == (obj) && \ 337 (dbuf)->db_objset == (os) && \ 338 (dbuf)->db_level == (level) && \ 339 (dbuf)->db_blkid == (blkid)) 340 341 dmu_buf_impl_t * 342 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 343 { 344 dbuf_hash_table_t *h = &dbuf_hash_table; 345 uint64_t hv; 346 uint64_t idx; 347 dmu_buf_impl_t *db; 348 349 hv = dbuf_hash(os, obj, level, blkid); 350 idx = hv & h->hash_table_mask; 351 352 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 353 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 354 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 355 mutex_enter(&db->db_mtx); 356 if (db->db_state != DB_EVICTING) { 357 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 358 return (db); 359 } 360 mutex_exit(&db->db_mtx); 361 } 362 } 363 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 364 return (NULL); 365 } 366 367 static dmu_buf_impl_t * 368 dbuf_find_bonus(objset_t *os, uint64_t object) 369 { 370 dnode_t *dn; 371 dmu_buf_impl_t *db = NULL; 372 373 if (dnode_hold(os, object, FTAG, &dn) == 0) { 374 rw_enter(&dn->dn_struct_rwlock, RW_READER); 375 if (dn->dn_bonus != NULL) { 376 db = dn->dn_bonus; 377 mutex_enter(&db->db_mtx); 378 } 379 rw_exit(&dn->dn_struct_rwlock); 380 dnode_rele(dn, FTAG); 381 } 382 return (db); 383 } 384 385 /* 386 * Insert an entry into the hash table. If there is already an element 387 * equal to elem in the hash table, then the already existing element 388 * will be returned and the new element will not be inserted. 389 * Otherwise returns NULL. 390 */ 391 static dmu_buf_impl_t * 392 dbuf_hash_insert(dmu_buf_impl_t *db) 393 { 394 dbuf_hash_table_t *h = &dbuf_hash_table; 395 objset_t *os = db->db_objset; 396 uint64_t obj = db->db.db_object; 397 int level = db->db_level; 398 uint64_t blkid, hv, idx; 399 dmu_buf_impl_t *dbf; 400 uint32_t i; 401 402 blkid = db->db_blkid; 403 hv = dbuf_hash(os, obj, level, blkid); 404 idx = hv & h->hash_table_mask; 405 406 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 407 for (dbf = h->hash_table[idx], i = 0; dbf != NULL; 408 dbf = dbf->db_hash_next, i++) { 409 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 410 mutex_enter(&dbf->db_mtx); 411 if (dbf->db_state != DB_EVICTING) { 412 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 413 return (dbf); 414 } 415 mutex_exit(&dbf->db_mtx); 416 } 417 } 418 419 if (i > 0) { 420 DBUF_STAT_BUMP(hash_collisions); 421 if (i == 1) 422 DBUF_STAT_BUMP(hash_chains); 423 424 DBUF_STAT_MAX(hash_chain_max, i); 425 } 426 427 mutex_enter(&db->db_mtx); 428 db->db_hash_next = h->hash_table[idx]; 429 h->hash_table[idx] = db; 430 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 431 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64); 432 DBUF_STAT_MAX(hash_elements_max, he); 433 434 return (NULL); 435 } 436 437 /* 438 * This returns whether this dbuf should be stored in the metadata cache, which 439 * is based on whether it's from one of the dnode types that store data related 440 * to traversing dataset hierarchies. 441 */ 442 static boolean_t 443 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 444 { 445 DB_DNODE_ENTER(db); 446 dmu_object_type_t type = DB_DNODE(db)->dn_type; 447 DB_DNODE_EXIT(db); 448 449 /* Check if this dbuf is one of the types we care about */ 450 if (DMU_OT_IS_METADATA_CACHED(type)) { 451 /* If we hit this, then we set something up wrong in dmu_ot */ 452 ASSERT(DMU_OT_IS_METADATA(type)); 453 454 /* 455 * Sanity check for small-memory systems: don't allocate too 456 * much memory for this purpose. 457 */ 458 if (zfs_refcount_count( 459 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 460 dbuf_metadata_cache_target_bytes()) { 461 DBUF_STAT_BUMP(metadata_cache_overflow); 462 return (B_FALSE); 463 } 464 465 return (B_TRUE); 466 } 467 468 return (B_FALSE); 469 } 470 471 /* 472 * Remove an entry from the hash table. It must be in the EVICTING state. 473 */ 474 static void 475 dbuf_hash_remove(dmu_buf_impl_t *db) 476 { 477 dbuf_hash_table_t *h = &dbuf_hash_table; 478 uint64_t hv, idx; 479 dmu_buf_impl_t *dbf, **dbp; 480 481 hv = dbuf_hash(db->db_objset, db->db.db_object, 482 db->db_level, db->db_blkid); 483 idx = hv & h->hash_table_mask; 484 485 /* 486 * We mustn't hold db_mtx to maintain lock ordering: 487 * DBUF_HASH_MUTEX > db_mtx. 488 */ 489 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 490 ASSERT(db->db_state == DB_EVICTING); 491 ASSERT(!MUTEX_HELD(&db->db_mtx)); 492 493 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 494 dbp = &h->hash_table[idx]; 495 while ((dbf = *dbp) != db) { 496 dbp = &dbf->db_hash_next; 497 ASSERT(dbf != NULL); 498 } 499 *dbp = db->db_hash_next; 500 db->db_hash_next = NULL; 501 if (h->hash_table[idx] && 502 h->hash_table[idx]->db_hash_next == NULL) 503 DBUF_STAT_BUMPDOWN(hash_chains); 504 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 505 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64); 506 } 507 508 typedef enum { 509 DBVU_EVICTING, 510 DBVU_NOT_EVICTING 511 } dbvu_verify_type_t; 512 513 static void 514 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 515 { 516 #ifdef ZFS_DEBUG 517 int64_t holds; 518 519 if (db->db_user == NULL) 520 return; 521 522 /* Only data blocks support the attachment of user data. */ 523 ASSERT(db->db_level == 0); 524 525 /* Clients must resolve a dbuf before attaching user data. */ 526 ASSERT(db->db.db_data != NULL); 527 ASSERT3U(db->db_state, ==, DB_CACHED); 528 529 holds = zfs_refcount_count(&db->db_holds); 530 if (verify_type == DBVU_EVICTING) { 531 /* 532 * Immediate eviction occurs when holds == dirtycnt. 533 * For normal eviction buffers, holds is zero on 534 * eviction, except when dbuf_fix_old_data() calls 535 * dbuf_clear_data(). However, the hold count can grow 536 * during eviction even though db_mtx is held (see 537 * dmu_bonus_hold() for an example), so we can only 538 * test the generic invariant that holds >= dirtycnt. 539 */ 540 ASSERT3U(holds, >=, db->db_dirtycnt); 541 } else { 542 if (db->db_user_immediate_evict == TRUE) 543 ASSERT3U(holds, >=, db->db_dirtycnt); 544 else 545 ASSERT3U(holds, >, 0); 546 } 547 #endif 548 } 549 550 static void 551 dbuf_evict_user(dmu_buf_impl_t *db) 552 { 553 dmu_buf_user_t *dbu = db->db_user; 554 555 ASSERT(MUTEX_HELD(&db->db_mtx)); 556 557 if (dbu == NULL) 558 return; 559 560 dbuf_verify_user(db, DBVU_EVICTING); 561 db->db_user = NULL; 562 563 #ifdef ZFS_DEBUG 564 if (dbu->dbu_clear_on_evict_dbufp != NULL) 565 *dbu->dbu_clear_on_evict_dbufp = NULL; 566 #endif 567 568 /* 569 * There are two eviction callbacks - one that we call synchronously 570 * and one that we invoke via a taskq. The async one is useful for 571 * avoiding lock order reversals and limiting stack depth. 572 * 573 * Note that if we have a sync callback but no async callback, 574 * it's likely that the sync callback will free the structure 575 * containing the dbu. In that case we need to take care to not 576 * dereference dbu after calling the sync evict func. 577 */ 578 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 579 580 if (dbu->dbu_evict_func_sync != NULL) 581 dbu->dbu_evict_func_sync(dbu); 582 583 if (has_async) { 584 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 585 dbu, 0, &dbu->dbu_tqent); 586 } 587 } 588 589 boolean_t 590 dbuf_is_metadata(dmu_buf_impl_t *db) 591 { 592 /* 593 * Consider indirect blocks and spill blocks to be meta data. 594 */ 595 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { 596 return (B_TRUE); 597 } else { 598 boolean_t is_metadata; 599 600 DB_DNODE_ENTER(db); 601 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 602 DB_DNODE_EXIT(db); 603 604 return (is_metadata); 605 } 606 } 607 608 /* 609 * We want to exclude buffers that are on a special allocation class from 610 * L2ARC. 611 */ 612 boolean_t 613 dbuf_is_l2cacheable(dmu_buf_impl_t *db) 614 { 615 vdev_t *vd = NULL; 616 zfs_cache_type_t cache = db->db_objset->os_secondary_cache; 617 blkptr_t *bp = db->db_blkptr; 618 619 if (bp != NULL && !BP_IS_HOLE(bp)) { 620 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); 621 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev; 622 623 if (vdev < rvd->vdev_children) 624 vd = rvd->vdev_child[vdev]; 625 626 if (cache == ZFS_CACHE_ALL || 627 (dbuf_is_metadata(db) && cache == ZFS_CACHE_METADATA)) { 628 if (vd == NULL) 629 return (B_TRUE); 630 631 if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 632 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) || 633 l2arc_exclude_special == 0) 634 return (B_TRUE); 635 } 636 } 637 638 return (B_FALSE); 639 } 640 641 static inline boolean_t 642 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level) 643 { 644 vdev_t *vd = NULL; 645 zfs_cache_type_t cache = dn->dn_objset->os_secondary_cache; 646 647 if (bp != NULL && !BP_IS_HOLE(bp)) { 648 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); 649 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev; 650 651 if (vdev < rvd->vdev_children) 652 vd = rvd->vdev_child[vdev]; 653 654 if (cache == ZFS_CACHE_ALL || ((level > 0 || 655 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)) && 656 cache == ZFS_CACHE_METADATA)) { 657 if (vd == NULL) 658 return (B_TRUE); 659 660 if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 661 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) || 662 l2arc_exclude_special == 0) 663 return (B_TRUE); 664 } 665 } 666 667 return (B_FALSE); 668 } 669 670 671 /* 672 * This function *must* return indices evenly distributed between all 673 * sublists of the multilist. This is needed due to how the dbuf eviction 674 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 675 * distributed between all sublists and uses this assumption when 676 * deciding which sublist to evict from and how much to evict from it. 677 */ 678 static unsigned int 679 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 680 { 681 dmu_buf_impl_t *db = obj; 682 683 /* 684 * The assumption here, is the hash value for a given 685 * dmu_buf_impl_t will remain constant throughout it's lifetime 686 * (i.e. it's objset, object, level and blkid fields don't change). 687 * Thus, we don't need to store the dbuf's sublist index 688 * on insertion, as this index can be recalculated on removal. 689 * 690 * Also, the low order bits of the hash value are thought to be 691 * distributed evenly. Otherwise, in the case that the multilist 692 * has a power of two number of sublists, each sublists' usage 693 * would not be evenly distributed. In this context full 64bit 694 * division would be a waste of time, so limit it to 32 bits. 695 */ 696 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object, 697 db->db_level, db->db_blkid) % 698 multilist_get_num_sublists(ml)); 699 } 700 701 /* 702 * The target size of the dbuf cache can grow with the ARC target, 703 * unless limited by the tunable dbuf_cache_max_bytes. 704 */ 705 static inline unsigned long 706 dbuf_cache_target_bytes(void) 707 { 708 return (MIN(dbuf_cache_max_bytes, 709 arc_target_bytes() >> dbuf_cache_shift)); 710 } 711 712 /* 713 * The target size of the dbuf metadata cache can grow with the ARC target, 714 * unless limited by the tunable dbuf_metadata_cache_max_bytes. 715 */ 716 static inline unsigned long 717 dbuf_metadata_cache_target_bytes(void) 718 { 719 return (MIN(dbuf_metadata_cache_max_bytes, 720 arc_target_bytes() >> dbuf_metadata_cache_shift)); 721 } 722 723 static inline uint64_t 724 dbuf_cache_hiwater_bytes(void) 725 { 726 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 727 return (dbuf_cache_target + 728 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100); 729 } 730 731 static inline uint64_t 732 dbuf_cache_lowater_bytes(void) 733 { 734 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 735 return (dbuf_cache_target - 736 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100); 737 } 738 739 static inline boolean_t 740 dbuf_cache_above_lowater(void) 741 { 742 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 743 dbuf_cache_lowater_bytes()); 744 } 745 746 /* 747 * Evict the oldest eligible dbuf from the dbuf cache. 748 */ 749 static void 750 dbuf_evict_one(void) 751 { 752 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache); 753 multilist_sublist_t *mls = multilist_sublist_lock( 754 &dbuf_caches[DB_DBUF_CACHE].cache, idx); 755 756 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 757 758 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 759 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 760 db = multilist_sublist_prev(mls, db); 761 } 762 763 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 764 multilist_sublist_t *, mls); 765 766 if (db != NULL) { 767 multilist_sublist_remove(mls, db); 768 multilist_sublist_unlock(mls); 769 (void) zfs_refcount_remove_many( 770 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db); 771 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 772 DBUF_STAT_BUMPDOWN(cache_count); 773 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 774 db->db.db_size); 775 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 776 db->db_caching_status = DB_NO_CACHE; 777 dbuf_destroy(db); 778 DBUF_STAT_BUMP(cache_total_evicts); 779 } else { 780 multilist_sublist_unlock(mls); 781 } 782 } 783 784 /* 785 * The dbuf evict thread is responsible for aging out dbufs from the 786 * cache. Once the cache has reached it's maximum size, dbufs are removed 787 * and destroyed. The eviction thread will continue running until the size 788 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 789 * out of the cache it is destroyed and becomes eligible for arc eviction. 790 */ 791 static __attribute__((noreturn)) void 792 dbuf_evict_thread(void *unused) 793 { 794 (void) unused; 795 callb_cpr_t cpr; 796 797 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 798 799 mutex_enter(&dbuf_evict_lock); 800 while (!dbuf_evict_thread_exit) { 801 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 802 CALLB_CPR_SAFE_BEGIN(&cpr); 803 (void) cv_timedwait_idle_hires(&dbuf_evict_cv, 804 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 805 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 806 } 807 mutex_exit(&dbuf_evict_lock); 808 809 /* 810 * Keep evicting as long as we're above the low water mark 811 * for the cache. We do this without holding the locks to 812 * minimize lock contention. 813 */ 814 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 815 dbuf_evict_one(); 816 } 817 818 mutex_enter(&dbuf_evict_lock); 819 } 820 821 dbuf_evict_thread_exit = B_FALSE; 822 cv_broadcast(&dbuf_evict_cv); 823 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 824 thread_exit(); 825 } 826 827 /* 828 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 829 * If the dbuf cache is at its high water mark, then evict a dbuf from the 830 * dbuf cache using the caller's context. 831 */ 832 static void 833 dbuf_evict_notify(uint64_t size) 834 { 835 /* 836 * We check if we should evict without holding the dbuf_evict_lock, 837 * because it's OK to occasionally make the wrong decision here, 838 * and grabbing the lock results in massive lock contention. 839 */ 840 if (size > dbuf_cache_target_bytes()) { 841 if (size > dbuf_cache_hiwater_bytes()) 842 dbuf_evict_one(); 843 cv_signal(&dbuf_evict_cv); 844 } 845 } 846 847 static int 848 dbuf_kstat_update(kstat_t *ksp, int rw) 849 { 850 dbuf_stats_t *ds = ksp->ks_data; 851 dbuf_hash_table_t *h = &dbuf_hash_table; 852 853 if (rw == KSTAT_WRITE) 854 return (SET_ERROR(EACCES)); 855 856 ds->cache_count.value.ui64 = 857 wmsum_value(&dbuf_sums.cache_count); 858 ds->cache_size_bytes.value.ui64 = 859 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size); 860 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes(); 861 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes(); 862 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes(); 863 ds->cache_total_evicts.value.ui64 = 864 wmsum_value(&dbuf_sums.cache_total_evicts); 865 for (int i = 0; i < DN_MAX_LEVELS; i++) { 866 ds->cache_levels[i].value.ui64 = 867 wmsum_value(&dbuf_sums.cache_levels[i]); 868 ds->cache_levels_bytes[i].value.ui64 = 869 wmsum_value(&dbuf_sums.cache_levels_bytes[i]); 870 } 871 ds->hash_hits.value.ui64 = 872 wmsum_value(&dbuf_sums.hash_hits); 873 ds->hash_misses.value.ui64 = 874 wmsum_value(&dbuf_sums.hash_misses); 875 ds->hash_collisions.value.ui64 = 876 wmsum_value(&dbuf_sums.hash_collisions); 877 ds->hash_chains.value.ui64 = 878 wmsum_value(&dbuf_sums.hash_chains); 879 ds->hash_insert_race.value.ui64 = 880 wmsum_value(&dbuf_sums.hash_insert_race); 881 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1; 882 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1; 883 ds->metadata_cache_count.value.ui64 = 884 wmsum_value(&dbuf_sums.metadata_cache_count); 885 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count( 886 &dbuf_caches[DB_DBUF_METADATA_CACHE].size); 887 ds->metadata_cache_overflow.value.ui64 = 888 wmsum_value(&dbuf_sums.metadata_cache_overflow); 889 return (0); 890 } 891 892 void 893 dbuf_init(void) 894 { 895 uint64_t hmsize, hsize = 1ULL << 16; 896 dbuf_hash_table_t *h = &dbuf_hash_table; 897 898 /* 899 * The hash table is big enough to fill one eighth of physical memory 900 * with an average block size of zfs_arc_average_blocksize (default 8K). 901 * By default, the table will take up 902 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 903 */ 904 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8) 905 hsize <<= 1; 906 907 h->hash_table = NULL; 908 while (h->hash_table == NULL) { 909 h->hash_table_mask = hsize - 1; 910 911 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); 912 if (h->hash_table == NULL) 913 hsize >>= 1; 914 915 ASSERT3U(hsize, >=, 1ULL << 10); 916 } 917 918 /* 919 * The hash table buckets are protected by an array of mutexes where 920 * each mutex is reponsible for protecting 128 buckets. A minimum 921 * array size of 8192 is targeted to avoid contention. 922 */ 923 if (dbuf_mutex_cache_shift == 0) 924 hmsize = MAX(hsize >> 7, 1ULL << 13); 925 else 926 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24); 927 928 h->hash_mutexes = NULL; 929 while (h->hash_mutexes == NULL) { 930 h->hash_mutex_mask = hmsize - 1; 931 932 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t), 933 KM_SLEEP); 934 if (h->hash_mutexes == NULL) 935 hmsize >>= 1; 936 } 937 938 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 939 sizeof (dmu_buf_impl_t), 940 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 941 942 for (int i = 0; i < hmsize; i++) 943 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 944 945 dbuf_stats_init(h); 946 947 /* 948 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 949 * configuration is not required. 950 */ 951 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); 952 953 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 954 multilist_create(&dbuf_caches[dcs].cache, 955 sizeof (dmu_buf_impl_t), 956 offsetof(dmu_buf_impl_t, db_cache_link), 957 dbuf_cache_multilist_index_func); 958 zfs_refcount_create(&dbuf_caches[dcs].size); 959 } 960 961 dbuf_evict_thread_exit = B_FALSE; 962 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 963 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 964 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 965 NULL, 0, &p0, TS_RUN, minclsyspri); 966 967 wmsum_init(&dbuf_sums.cache_count, 0); 968 wmsum_init(&dbuf_sums.cache_total_evicts, 0); 969 for (int i = 0; i < DN_MAX_LEVELS; i++) { 970 wmsum_init(&dbuf_sums.cache_levels[i], 0); 971 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0); 972 } 973 wmsum_init(&dbuf_sums.hash_hits, 0); 974 wmsum_init(&dbuf_sums.hash_misses, 0); 975 wmsum_init(&dbuf_sums.hash_collisions, 0); 976 wmsum_init(&dbuf_sums.hash_chains, 0); 977 wmsum_init(&dbuf_sums.hash_insert_race, 0); 978 wmsum_init(&dbuf_sums.metadata_cache_count, 0); 979 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0); 980 981 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc", 982 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t), 983 KSTAT_FLAG_VIRTUAL); 984 if (dbuf_ksp != NULL) { 985 for (int i = 0; i < DN_MAX_LEVELS; i++) { 986 snprintf(dbuf_stats.cache_levels[i].name, 987 KSTAT_STRLEN, "cache_level_%d", i); 988 dbuf_stats.cache_levels[i].data_type = 989 KSTAT_DATA_UINT64; 990 snprintf(dbuf_stats.cache_levels_bytes[i].name, 991 KSTAT_STRLEN, "cache_level_%d_bytes", i); 992 dbuf_stats.cache_levels_bytes[i].data_type = 993 KSTAT_DATA_UINT64; 994 } 995 dbuf_ksp->ks_data = &dbuf_stats; 996 dbuf_ksp->ks_update = dbuf_kstat_update; 997 kstat_install(dbuf_ksp); 998 } 999 } 1000 1001 void 1002 dbuf_fini(void) 1003 { 1004 dbuf_hash_table_t *h = &dbuf_hash_table; 1005 1006 dbuf_stats_destroy(); 1007 1008 for (int i = 0; i < (h->hash_mutex_mask + 1); i++) 1009 mutex_destroy(&h->hash_mutexes[i]); 1010 1011 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 1012 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) * 1013 sizeof (kmutex_t)); 1014 1015 kmem_cache_destroy(dbuf_kmem_cache); 1016 taskq_destroy(dbu_evict_taskq); 1017 1018 mutex_enter(&dbuf_evict_lock); 1019 dbuf_evict_thread_exit = B_TRUE; 1020 while (dbuf_evict_thread_exit) { 1021 cv_signal(&dbuf_evict_cv); 1022 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 1023 } 1024 mutex_exit(&dbuf_evict_lock); 1025 1026 mutex_destroy(&dbuf_evict_lock); 1027 cv_destroy(&dbuf_evict_cv); 1028 1029 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 1030 zfs_refcount_destroy(&dbuf_caches[dcs].size); 1031 multilist_destroy(&dbuf_caches[dcs].cache); 1032 } 1033 1034 if (dbuf_ksp != NULL) { 1035 kstat_delete(dbuf_ksp); 1036 dbuf_ksp = NULL; 1037 } 1038 1039 wmsum_fini(&dbuf_sums.cache_count); 1040 wmsum_fini(&dbuf_sums.cache_total_evicts); 1041 for (int i = 0; i < DN_MAX_LEVELS; i++) { 1042 wmsum_fini(&dbuf_sums.cache_levels[i]); 1043 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]); 1044 } 1045 wmsum_fini(&dbuf_sums.hash_hits); 1046 wmsum_fini(&dbuf_sums.hash_misses); 1047 wmsum_fini(&dbuf_sums.hash_collisions); 1048 wmsum_fini(&dbuf_sums.hash_chains); 1049 wmsum_fini(&dbuf_sums.hash_insert_race); 1050 wmsum_fini(&dbuf_sums.metadata_cache_count); 1051 wmsum_fini(&dbuf_sums.metadata_cache_overflow); 1052 } 1053 1054 /* 1055 * Other stuff. 1056 */ 1057 1058 #ifdef ZFS_DEBUG 1059 static void 1060 dbuf_verify(dmu_buf_impl_t *db) 1061 { 1062 dnode_t *dn; 1063 dbuf_dirty_record_t *dr; 1064 uint32_t txg_prev; 1065 1066 ASSERT(MUTEX_HELD(&db->db_mtx)); 1067 1068 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 1069 return; 1070 1071 ASSERT(db->db_objset != NULL); 1072 DB_DNODE_ENTER(db); 1073 dn = DB_DNODE(db); 1074 if (dn == NULL) { 1075 ASSERT(db->db_parent == NULL); 1076 ASSERT(db->db_blkptr == NULL); 1077 } else { 1078 ASSERT3U(db->db.db_object, ==, dn->dn_object); 1079 ASSERT3P(db->db_objset, ==, dn->dn_objset); 1080 ASSERT3U(db->db_level, <, dn->dn_nlevels); 1081 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 1082 db->db_blkid == DMU_SPILL_BLKID || 1083 !avl_is_empty(&dn->dn_dbufs)); 1084 } 1085 if (db->db_blkid == DMU_BONUS_BLKID) { 1086 ASSERT(dn != NULL); 1087 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1088 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 1089 } else if (db->db_blkid == DMU_SPILL_BLKID) { 1090 ASSERT(dn != NULL); 1091 ASSERT0(db->db.db_offset); 1092 } else { 1093 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 1094 } 1095 1096 if ((dr = list_head(&db->db_dirty_records)) != NULL) { 1097 ASSERT(dr->dr_dbuf == db); 1098 txg_prev = dr->dr_txg; 1099 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL; 1100 dr = list_next(&db->db_dirty_records, dr)) { 1101 ASSERT(dr->dr_dbuf == db); 1102 ASSERT(txg_prev > dr->dr_txg); 1103 txg_prev = dr->dr_txg; 1104 } 1105 } 1106 1107 /* 1108 * We can't assert that db_size matches dn_datablksz because it 1109 * can be momentarily different when another thread is doing 1110 * dnode_set_blksz(). 1111 */ 1112 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 1113 dr = db->db_data_pending; 1114 /* 1115 * It should only be modified in syncing context, so 1116 * make sure we only have one copy of the data. 1117 */ 1118 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 1119 } 1120 1121 /* verify db->db_blkptr */ 1122 if (db->db_blkptr) { 1123 if (db->db_parent == dn->dn_dbuf) { 1124 /* db is pointed to by the dnode */ 1125 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 1126 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 1127 ASSERT(db->db_parent == NULL); 1128 else 1129 ASSERT(db->db_parent != NULL); 1130 if (db->db_blkid != DMU_SPILL_BLKID) 1131 ASSERT3P(db->db_blkptr, ==, 1132 &dn->dn_phys->dn_blkptr[db->db_blkid]); 1133 } else { 1134 /* db is pointed to by an indirect block */ 1135 int epb __maybe_unused = db->db_parent->db.db_size >> 1136 SPA_BLKPTRSHIFT; 1137 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 1138 ASSERT3U(db->db_parent->db.db_object, ==, 1139 db->db.db_object); 1140 /* 1141 * dnode_grow_indblksz() can make this fail if we don't 1142 * have the parent's rwlock. XXX indblksz no longer 1143 * grows. safe to do this now? 1144 */ 1145 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) { 1146 ASSERT3P(db->db_blkptr, ==, 1147 ((blkptr_t *)db->db_parent->db.db_data + 1148 db->db_blkid % epb)); 1149 } 1150 } 1151 } 1152 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 1153 (db->db_buf == NULL || db->db_buf->b_data) && 1154 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 1155 db->db_state != DB_FILL && !dn->dn_free_txg) { 1156 /* 1157 * If the blkptr isn't set but they have nonzero data, 1158 * it had better be dirty, otherwise we'll lose that 1159 * data when we evict this buffer. 1160 * 1161 * There is an exception to this rule for indirect blocks; in 1162 * this case, if the indirect block is a hole, we fill in a few 1163 * fields on each of the child blocks (importantly, birth time) 1164 * to prevent hole birth times from being lost when you 1165 * partially fill in a hole. 1166 */ 1167 if (db->db_dirtycnt == 0) { 1168 if (db->db_level == 0) { 1169 uint64_t *buf = db->db.db_data; 1170 int i; 1171 1172 for (i = 0; i < db->db.db_size >> 3; i++) { 1173 ASSERT(buf[i] == 0); 1174 } 1175 } else { 1176 blkptr_t *bps = db->db.db_data; 1177 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 1178 db->db.db_size); 1179 /* 1180 * We want to verify that all the blkptrs in the 1181 * indirect block are holes, but we may have 1182 * automatically set up a few fields for them. 1183 * We iterate through each blkptr and verify 1184 * they only have those fields set. 1185 */ 1186 for (int i = 0; 1187 i < db->db.db_size / sizeof (blkptr_t); 1188 i++) { 1189 blkptr_t *bp = &bps[i]; 1190 ASSERT(ZIO_CHECKSUM_IS_ZERO( 1191 &bp->blk_cksum)); 1192 ASSERT( 1193 DVA_IS_EMPTY(&bp->blk_dva[0]) && 1194 DVA_IS_EMPTY(&bp->blk_dva[1]) && 1195 DVA_IS_EMPTY(&bp->blk_dva[2])); 1196 ASSERT0(bp->blk_fill); 1197 ASSERT0(bp->blk_pad[0]); 1198 ASSERT0(bp->blk_pad[1]); 1199 ASSERT(!BP_IS_EMBEDDED(bp)); 1200 ASSERT(BP_IS_HOLE(bp)); 1201 ASSERT0(bp->blk_phys_birth); 1202 } 1203 } 1204 } 1205 } 1206 DB_DNODE_EXIT(db); 1207 } 1208 #endif 1209 1210 static void 1211 dbuf_clear_data(dmu_buf_impl_t *db) 1212 { 1213 ASSERT(MUTEX_HELD(&db->db_mtx)); 1214 dbuf_evict_user(db); 1215 ASSERT3P(db->db_buf, ==, NULL); 1216 db->db.db_data = NULL; 1217 if (db->db_state != DB_NOFILL) { 1218 db->db_state = DB_UNCACHED; 1219 DTRACE_SET_STATE(db, "clear data"); 1220 } 1221 } 1222 1223 static void 1224 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 1225 { 1226 ASSERT(MUTEX_HELD(&db->db_mtx)); 1227 ASSERT(buf != NULL); 1228 1229 db->db_buf = buf; 1230 ASSERT(buf->b_data != NULL); 1231 db->db.db_data = buf->b_data; 1232 } 1233 1234 static arc_buf_t * 1235 dbuf_alloc_arcbuf(dmu_buf_impl_t *db) 1236 { 1237 spa_t *spa = db->db_objset->os_spa; 1238 1239 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 1240 } 1241 1242 /* 1243 * Loan out an arc_buf for read. Return the loaned arc_buf. 1244 */ 1245 arc_buf_t * 1246 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 1247 { 1248 arc_buf_t *abuf; 1249 1250 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1251 mutex_enter(&db->db_mtx); 1252 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { 1253 int blksz = db->db.db_size; 1254 spa_t *spa = db->db_objset->os_spa; 1255 1256 mutex_exit(&db->db_mtx); 1257 abuf = arc_loan_buf(spa, B_FALSE, blksz); 1258 memcpy(abuf->b_data, db->db.db_data, blksz); 1259 } else { 1260 abuf = db->db_buf; 1261 arc_loan_inuse_buf(abuf, db); 1262 db->db_buf = NULL; 1263 dbuf_clear_data(db); 1264 mutex_exit(&db->db_mtx); 1265 } 1266 return (abuf); 1267 } 1268 1269 /* 1270 * Calculate which level n block references the data at the level 0 offset 1271 * provided. 1272 */ 1273 uint64_t 1274 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) 1275 { 1276 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 1277 /* 1278 * The level n blkid is equal to the level 0 blkid divided by 1279 * the number of level 0s in a level n block. 1280 * 1281 * The level 0 blkid is offset >> datablkshift = 1282 * offset / 2^datablkshift. 1283 * 1284 * The number of level 0s in a level n is the number of block 1285 * pointers in an indirect block, raised to the power of level. 1286 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 1287 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 1288 * 1289 * Thus, the level n blkid is: offset / 1290 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT)))) 1291 * = offset / 2^(datablkshift + level * 1292 * (indblkshift - SPA_BLKPTRSHIFT)) 1293 * = offset >> (datablkshift + level * 1294 * (indblkshift - SPA_BLKPTRSHIFT)) 1295 */ 1296 1297 const unsigned exp = dn->dn_datablkshift + 1298 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 1299 1300 if (exp >= 8 * sizeof (offset)) { 1301 /* This only happens on the highest indirection level */ 1302 ASSERT3U(level, ==, dn->dn_nlevels - 1); 1303 return (0); 1304 } 1305 1306 ASSERT3U(exp, <, 8 * sizeof (offset)); 1307 1308 return (offset >> exp); 1309 } else { 1310 ASSERT3U(offset, <, dn->dn_datablksz); 1311 return (0); 1312 } 1313 } 1314 1315 /* 1316 * This function is used to lock the parent of the provided dbuf. This should be 1317 * used when modifying or reading db_blkptr. 1318 */ 1319 db_lock_type_t 1320 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag) 1321 { 1322 enum db_lock_type ret = DLT_NONE; 1323 if (db->db_parent != NULL) { 1324 rw_enter(&db->db_parent->db_rwlock, rw); 1325 ret = DLT_PARENT; 1326 } else if (dmu_objset_ds(db->db_objset) != NULL) { 1327 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw, 1328 tag); 1329 ret = DLT_OBJSET; 1330 } 1331 /* 1332 * We only return a DLT_NONE lock when it's the top-most indirect block 1333 * of the meta-dnode of the MOS. 1334 */ 1335 return (ret); 1336 } 1337 1338 /* 1339 * We need to pass the lock type in because it's possible that the block will 1340 * move from being the topmost indirect block in a dnode (and thus, have no 1341 * parent) to not the top-most via an indirection increase. This would cause a 1342 * panic if we didn't pass the lock type in. 1343 */ 1344 void 1345 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag) 1346 { 1347 if (type == DLT_PARENT) 1348 rw_exit(&db->db_parent->db_rwlock); 1349 else if (type == DLT_OBJSET) 1350 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag); 1351 } 1352 1353 static void 1354 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1355 arc_buf_t *buf, void *vdb) 1356 { 1357 (void) zb, (void) bp; 1358 dmu_buf_impl_t *db = vdb; 1359 1360 mutex_enter(&db->db_mtx); 1361 ASSERT3U(db->db_state, ==, DB_READ); 1362 /* 1363 * All reads are synchronous, so we must have a hold on the dbuf 1364 */ 1365 ASSERT(zfs_refcount_count(&db->db_holds) > 0); 1366 ASSERT(db->db_buf == NULL); 1367 ASSERT(db->db.db_data == NULL); 1368 if (buf == NULL) { 1369 /* i/o error */ 1370 ASSERT(zio == NULL || zio->io_error != 0); 1371 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1372 ASSERT3P(db->db_buf, ==, NULL); 1373 db->db_state = DB_UNCACHED; 1374 DTRACE_SET_STATE(db, "i/o error"); 1375 } else if (db->db_level == 0 && db->db_freed_in_flight) { 1376 /* freed in flight */ 1377 ASSERT(zio == NULL || zio->io_error == 0); 1378 arc_release(buf, db); 1379 memset(buf->b_data, 0, db->db.db_size); 1380 arc_buf_freeze(buf); 1381 db->db_freed_in_flight = FALSE; 1382 dbuf_set_data(db, buf); 1383 db->db_state = DB_CACHED; 1384 DTRACE_SET_STATE(db, "freed in flight"); 1385 } else { 1386 /* success */ 1387 ASSERT(zio == NULL || zio->io_error == 0); 1388 dbuf_set_data(db, buf); 1389 db->db_state = DB_CACHED; 1390 DTRACE_SET_STATE(db, "successful read"); 1391 } 1392 cv_broadcast(&db->db_changed); 1393 dbuf_rele_and_unlock(db, NULL, B_FALSE); 1394 } 1395 1396 /* 1397 * Shortcut for performing reads on bonus dbufs. Returns 1398 * an error if we fail to verify the dnode associated with 1399 * a decrypted block. Otherwise success. 1400 */ 1401 static int 1402 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags) 1403 { 1404 int bonuslen, max_bonuslen, err; 1405 1406 err = dbuf_read_verify_dnode_crypt(db, flags); 1407 if (err) 1408 return (err); 1409 1410 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 1411 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1412 ASSERT(MUTEX_HELD(&db->db_mtx)); 1413 ASSERT(DB_DNODE_HELD(db)); 1414 ASSERT3U(bonuslen, <=, db->db.db_size); 1415 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); 1416 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 1417 if (bonuslen < max_bonuslen) 1418 memset(db->db.db_data, 0, max_bonuslen); 1419 if (bonuslen) 1420 memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen); 1421 db->db_state = DB_CACHED; 1422 DTRACE_SET_STATE(db, "bonus buffer filled"); 1423 return (0); 1424 } 1425 1426 static void 1427 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn) 1428 { 1429 blkptr_t *bps = db->db.db_data; 1430 uint32_t indbs = 1ULL << dn->dn_indblkshift; 1431 int n_bps = indbs >> SPA_BLKPTRSHIFT; 1432 1433 for (int i = 0; i < n_bps; i++) { 1434 blkptr_t *bp = &bps[i]; 1435 1436 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs); 1437 BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ? 1438 dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr)); 1439 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 1440 BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1); 1441 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 1442 } 1443 } 1444 1445 /* 1446 * Handle reads on dbufs that are holes, if necessary. This function 1447 * requires that the dbuf's mutex is held. Returns success (0) if action 1448 * was taken, ENOENT if no action was taken. 1449 */ 1450 static int 1451 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn) 1452 { 1453 ASSERT(MUTEX_HELD(&db->db_mtx)); 1454 1455 int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr); 1456 /* 1457 * For level 0 blocks only, if the above check fails: 1458 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 1459 * processes the delete record and clears the bp while we are waiting 1460 * for the dn_mtx (resulting in a "no" from block_freed). 1461 */ 1462 if (!is_hole && db->db_level == 0) { 1463 is_hole = dnode_block_freed(dn, db->db_blkid) || 1464 BP_IS_HOLE(db->db_blkptr); 1465 } 1466 1467 if (is_hole) { 1468 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1469 memset(db->db.db_data, 0, db->db.db_size); 1470 1471 if (db->db_blkptr != NULL && db->db_level > 0 && 1472 BP_IS_HOLE(db->db_blkptr) && 1473 db->db_blkptr->blk_birth != 0) { 1474 dbuf_handle_indirect_hole(db, dn); 1475 } 1476 db->db_state = DB_CACHED; 1477 DTRACE_SET_STATE(db, "hole read satisfied"); 1478 return (0); 1479 } 1480 return (ENOENT); 1481 } 1482 1483 /* 1484 * This function ensures that, when doing a decrypting read of a block, 1485 * we make sure we have decrypted the dnode associated with it. We must do 1486 * this so that we ensure we are fully authenticating the checksum-of-MACs 1487 * tree from the root of the objset down to this block. Indirect blocks are 1488 * always verified against their secure checksum-of-MACs assuming that the 1489 * dnode containing them is correct. Now that we are doing a decrypting read, 1490 * we can be sure that the key is loaded and verify that assumption. This is 1491 * especially important considering that we always read encrypted dnode 1492 * blocks as raw data (without verifying their MACs) to start, and 1493 * decrypt / authenticate them when we need to read an encrypted bonus buffer. 1494 */ 1495 static int 1496 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags) 1497 { 1498 int err = 0; 1499 objset_t *os = db->db_objset; 1500 arc_buf_t *dnode_abuf; 1501 dnode_t *dn; 1502 zbookmark_phys_t zb; 1503 1504 ASSERT(MUTEX_HELD(&db->db_mtx)); 1505 1506 if (!os->os_encrypted || os->os_raw_receive || 1507 (flags & DB_RF_NO_DECRYPT) != 0) 1508 return (0); 1509 1510 DB_DNODE_ENTER(db); 1511 dn = DB_DNODE(db); 1512 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL; 1513 1514 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) { 1515 DB_DNODE_EXIT(db); 1516 return (0); 1517 } 1518 1519 SET_BOOKMARK(&zb, dmu_objset_id(os), 1520 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid); 1521 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE); 1522 1523 /* 1524 * An error code of EACCES tells us that the key is still not 1525 * available. This is ok if we are only reading authenticated 1526 * (and therefore non-encrypted) blocks. 1527 */ 1528 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && 1529 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || 1530 (db->db_blkid == DMU_BONUS_BLKID && 1531 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) 1532 err = 0; 1533 1534 DB_DNODE_EXIT(db); 1535 1536 return (err); 1537 } 1538 1539 /* 1540 * Drops db_mtx and the parent lock specified by dblt and tag before 1541 * returning. 1542 */ 1543 static int 1544 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags, 1545 db_lock_type_t dblt, const void *tag) 1546 { 1547 dnode_t *dn; 1548 zbookmark_phys_t zb; 1549 uint32_t aflags = ARC_FLAG_NOWAIT; 1550 int err, zio_flags; 1551 1552 DB_DNODE_ENTER(db); 1553 dn = DB_DNODE(db); 1554 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1555 ASSERT(MUTEX_HELD(&db->db_mtx)); 1556 ASSERT(db->db_state == DB_UNCACHED); 1557 ASSERT(db->db_buf == NULL); 1558 ASSERT(db->db_parent == NULL || 1559 RW_LOCK_HELD(&db->db_parent->db_rwlock)); 1560 1561 if (db->db_blkid == DMU_BONUS_BLKID) { 1562 err = dbuf_read_bonus(db, dn, flags); 1563 goto early_unlock; 1564 } 1565 1566 err = dbuf_read_hole(db, dn); 1567 if (err == 0) 1568 goto early_unlock; 1569 1570 /* 1571 * Any attempt to read a redacted block should result in an error. This 1572 * will never happen under normal conditions, but can be useful for 1573 * debugging purposes. 1574 */ 1575 if (BP_IS_REDACTED(db->db_blkptr)) { 1576 ASSERT(dsl_dataset_feature_is_active( 1577 db->db_objset->os_dsl_dataset, 1578 SPA_FEATURE_REDACTED_DATASETS)); 1579 err = SET_ERROR(EIO); 1580 goto early_unlock; 1581 } 1582 1583 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1584 db->db.db_object, db->db_level, db->db_blkid); 1585 1586 /* 1587 * All bps of an encrypted os should have the encryption bit set. 1588 * If this is not true it indicates tampering and we report an error. 1589 */ 1590 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) { 1591 spa_log_error(db->db_objset->os_spa, &zb); 1592 zfs_panic_recover("unencrypted block in encrypted " 1593 "object set %llu", dmu_objset_id(db->db_objset)); 1594 err = SET_ERROR(EIO); 1595 goto early_unlock; 1596 } 1597 1598 err = dbuf_read_verify_dnode_crypt(db, flags); 1599 if (err != 0) 1600 goto early_unlock; 1601 1602 DB_DNODE_EXIT(db); 1603 1604 db->db_state = DB_READ; 1605 DTRACE_SET_STATE(db, "read issued"); 1606 mutex_exit(&db->db_mtx); 1607 1608 if (dbuf_is_l2cacheable(db)) 1609 aflags |= ARC_FLAG_L2CACHE; 1610 1611 dbuf_add_ref(db, NULL); 1612 1613 zio_flags = (flags & DB_RF_CANFAIL) ? 1614 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; 1615 1616 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) 1617 zio_flags |= ZIO_FLAG_RAW; 1618 /* 1619 * The zio layer will copy the provided blkptr later, but we need to 1620 * do this now so that we can release the parent's rwlock. We have to 1621 * do that now so that if dbuf_read_done is called synchronously (on 1622 * an l1 cache hit) we don't acquire the db_mtx while holding the 1623 * parent's rwlock, which would be a lock ordering violation. 1624 */ 1625 blkptr_t bp = *db->db_blkptr; 1626 dmu_buf_unlock_parent(db, dblt, tag); 1627 (void) arc_read(zio, db->db_objset->os_spa, &bp, 1628 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, 1629 &aflags, &zb); 1630 return (err); 1631 early_unlock: 1632 DB_DNODE_EXIT(db); 1633 mutex_exit(&db->db_mtx); 1634 dmu_buf_unlock_parent(db, dblt, tag); 1635 return (err); 1636 } 1637 1638 /* 1639 * This is our just-in-time copy function. It makes a copy of buffers that 1640 * have been modified in a previous transaction group before we access them in 1641 * the current active group. 1642 * 1643 * This function is used in three places: when we are dirtying a buffer for the 1644 * first time in a txg, when we are freeing a range in a dnode that includes 1645 * this buffer, and when we are accessing a buffer which was received compressed 1646 * and later referenced in a WRITE_BYREF record. 1647 * 1648 * Note that when we are called from dbuf_free_range() we do not put a hold on 1649 * the buffer, we just traverse the active dbuf list for the dnode. 1650 */ 1651 static void 1652 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1653 { 1654 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 1655 1656 ASSERT(MUTEX_HELD(&db->db_mtx)); 1657 ASSERT(db->db.db_data != NULL); 1658 ASSERT(db->db_level == 0); 1659 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1660 1661 if (dr == NULL || 1662 (dr->dt.dl.dr_data != 1663 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1664 return; 1665 1666 /* 1667 * If the last dirty record for this dbuf has not yet synced 1668 * and its referencing the dbuf data, either: 1669 * reset the reference to point to a new copy, 1670 * or (if there a no active holders) 1671 * just null out the current db_data pointer. 1672 */ 1673 ASSERT3U(dr->dr_txg, >=, txg - 2); 1674 if (db->db_blkid == DMU_BONUS_BLKID) { 1675 dnode_t *dn = DB_DNODE(db); 1676 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1677 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); 1678 arc_space_consume(bonuslen, ARC_SPACE_BONUS); 1679 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen); 1680 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1681 dnode_t *dn = DB_DNODE(db); 1682 int size = arc_buf_size(db->db_buf); 1683 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1684 spa_t *spa = db->db_objset->os_spa; 1685 enum zio_compress compress_type = 1686 arc_get_compression(db->db_buf); 1687 uint8_t complevel = arc_get_complevel(db->db_buf); 1688 1689 if (arc_is_encrypted(db->db_buf)) { 1690 boolean_t byteorder; 1691 uint8_t salt[ZIO_DATA_SALT_LEN]; 1692 uint8_t iv[ZIO_DATA_IV_LEN]; 1693 uint8_t mac[ZIO_DATA_MAC_LEN]; 1694 1695 arc_get_raw_params(db->db_buf, &byteorder, salt, 1696 iv, mac); 1697 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, 1698 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, 1699 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), 1700 compress_type, complevel); 1701 } else if (compress_type != ZIO_COMPRESS_OFF) { 1702 ASSERT3U(type, ==, ARC_BUFC_DATA); 1703 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1704 size, arc_buf_lsize(db->db_buf), compress_type, 1705 complevel); 1706 } else { 1707 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1708 } 1709 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size); 1710 } else { 1711 db->db_buf = NULL; 1712 dbuf_clear_data(db); 1713 } 1714 } 1715 1716 int 1717 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1718 { 1719 int err = 0; 1720 boolean_t prefetch; 1721 dnode_t *dn; 1722 1723 /* 1724 * We don't have to hold the mutex to check db_state because it 1725 * can't be freed while we have a hold on the buffer. 1726 */ 1727 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1728 1729 if (db->db_state == DB_NOFILL) 1730 return (SET_ERROR(EIO)); 1731 1732 DB_DNODE_ENTER(db); 1733 dn = DB_DNODE(db); 1734 1735 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1736 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 1737 DBUF_IS_CACHEABLE(db); 1738 1739 mutex_enter(&db->db_mtx); 1740 if (db->db_state == DB_CACHED) { 1741 spa_t *spa = dn->dn_objset->os_spa; 1742 1743 /* 1744 * Ensure that this block's dnode has been decrypted if 1745 * the caller has requested decrypted data. 1746 */ 1747 err = dbuf_read_verify_dnode_crypt(db, flags); 1748 1749 /* 1750 * If the arc buf is compressed or encrypted and the caller 1751 * requested uncompressed data, we need to untransform it 1752 * before returning. We also call arc_untransform() on any 1753 * unauthenticated blocks, which will verify their MAC if 1754 * the key is now available. 1755 */ 1756 if (err == 0 && db->db_buf != NULL && 1757 (flags & DB_RF_NO_DECRYPT) == 0 && 1758 (arc_is_encrypted(db->db_buf) || 1759 arc_is_unauthenticated(db->db_buf) || 1760 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 1761 zbookmark_phys_t zb; 1762 1763 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1764 db->db.db_object, db->db_level, db->db_blkid); 1765 dbuf_fix_old_data(db, spa_syncing_txg(spa)); 1766 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 1767 dbuf_set_data(db, db->db_buf); 1768 } 1769 mutex_exit(&db->db_mtx); 1770 if (err == 0 && prefetch) { 1771 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1772 B_FALSE, flags & DB_RF_HAVESTRUCT); 1773 } 1774 DB_DNODE_EXIT(db); 1775 DBUF_STAT_BUMP(hash_hits); 1776 } else if (db->db_state == DB_UNCACHED) { 1777 spa_t *spa = dn->dn_objset->os_spa; 1778 boolean_t need_wait = B_FALSE; 1779 1780 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 1781 1782 if (zio == NULL && 1783 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1784 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1785 need_wait = B_TRUE; 1786 } 1787 err = dbuf_read_impl(db, zio, flags, dblt, FTAG); 1788 /* 1789 * dbuf_read_impl has dropped db_mtx and our parent's rwlock 1790 * for us 1791 */ 1792 if (!err && prefetch) { 1793 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1794 db->db_state != DB_CACHED, 1795 flags & DB_RF_HAVESTRUCT); 1796 } 1797 1798 DB_DNODE_EXIT(db); 1799 DBUF_STAT_BUMP(hash_misses); 1800 1801 /* 1802 * If we created a zio_root we must execute it to avoid 1803 * leaking it, even if it isn't attached to any work due 1804 * to an error in dbuf_read_impl(). 1805 */ 1806 if (need_wait) { 1807 if (err == 0) 1808 err = zio_wait(zio); 1809 else 1810 VERIFY0(zio_wait(zio)); 1811 } 1812 } else { 1813 /* 1814 * Another reader came in while the dbuf was in flight 1815 * between UNCACHED and CACHED. Either a writer will finish 1816 * writing the buffer (sending the dbuf to CACHED) or the 1817 * first reader's request will reach the read_done callback 1818 * and send the dbuf to CACHED. Otherwise, a failure 1819 * occurred and the dbuf went to UNCACHED. 1820 */ 1821 mutex_exit(&db->db_mtx); 1822 if (prefetch) { 1823 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1824 B_TRUE, flags & DB_RF_HAVESTRUCT); 1825 } 1826 DB_DNODE_EXIT(db); 1827 DBUF_STAT_BUMP(hash_misses); 1828 1829 /* Skip the wait per the caller's request. */ 1830 if ((flags & DB_RF_NEVERWAIT) == 0) { 1831 mutex_enter(&db->db_mtx); 1832 while (db->db_state == DB_READ || 1833 db->db_state == DB_FILL) { 1834 ASSERT(db->db_state == DB_READ || 1835 (flags & DB_RF_HAVESTRUCT) == 0); 1836 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1837 db, zio_t *, zio); 1838 cv_wait(&db->db_changed, &db->db_mtx); 1839 } 1840 if (db->db_state == DB_UNCACHED) 1841 err = SET_ERROR(EIO); 1842 mutex_exit(&db->db_mtx); 1843 } 1844 } 1845 1846 return (err); 1847 } 1848 1849 static void 1850 dbuf_noread(dmu_buf_impl_t *db) 1851 { 1852 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1853 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1854 mutex_enter(&db->db_mtx); 1855 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1856 cv_wait(&db->db_changed, &db->db_mtx); 1857 if (db->db_state == DB_UNCACHED) { 1858 ASSERT(db->db_buf == NULL); 1859 ASSERT(db->db.db_data == NULL); 1860 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1861 db->db_state = DB_FILL; 1862 DTRACE_SET_STATE(db, "assigning filled buffer"); 1863 } else if (db->db_state == DB_NOFILL) { 1864 dbuf_clear_data(db); 1865 } else { 1866 ASSERT3U(db->db_state, ==, DB_CACHED); 1867 } 1868 mutex_exit(&db->db_mtx); 1869 } 1870 1871 void 1872 dbuf_unoverride(dbuf_dirty_record_t *dr) 1873 { 1874 dmu_buf_impl_t *db = dr->dr_dbuf; 1875 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1876 uint64_t txg = dr->dr_txg; 1877 1878 ASSERT(MUTEX_HELD(&db->db_mtx)); 1879 /* 1880 * This assert is valid because dmu_sync() expects to be called by 1881 * a zilog's get_data while holding a range lock. This call only 1882 * comes from dbuf_dirty() callers who must also hold a range lock. 1883 */ 1884 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1885 ASSERT(db->db_level == 0); 1886 1887 if (db->db_blkid == DMU_BONUS_BLKID || 1888 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1889 return; 1890 1891 ASSERT(db->db_data_pending != dr); 1892 1893 /* free this block */ 1894 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1895 zio_free(db->db_objset->os_spa, txg, bp); 1896 1897 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1898 dr->dt.dl.dr_nopwrite = B_FALSE; 1899 dr->dt.dl.dr_has_raw_params = B_FALSE; 1900 1901 /* 1902 * Release the already-written buffer, so we leave it in 1903 * a consistent dirty state. Note that all callers are 1904 * modifying the buffer, so they will immediately do 1905 * another (redundant) arc_release(). Therefore, leave 1906 * the buf thawed to save the effort of freezing & 1907 * immediately re-thawing it. 1908 */ 1909 arc_release(dr->dt.dl.dr_data, db); 1910 } 1911 1912 /* 1913 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1914 * data blocks in the free range, so that any future readers will find 1915 * empty blocks. 1916 */ 1917 void 1918 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1919 dmu_tx_t *tx) 1920 { 1921 dmu_buf_impl_t *db_search; 1922 dmu_buf_impl_t *db, *db_next; 1923 uint64_t txg = tx->tx_txg; 1924 avl_index_t where; 1925 dbuf_dirty_record_t *dr; 1926 1927 if (end_blkid > dn->dn_maxblkid && 1928 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1929 end_blkid = dn->dn_maxblkid; 1930 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid, 1931 (u_longlong_t)end_blkid); 1932 1933 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); 1934 db_search->db_level = 0; 1935 db_search->db_blkid = start_blkid; 1936 db_search->db_state = DB_SEARCH; 1937 1938 mutex_enter(&dn->dn_dbufs_mtx); 1939 db = avl_find(&dn->dn_dbufs, db_search, &where); 1940 ASSERT3P(db, ==, NULL); 1941 1942 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1943 1944 for (; db != NULL; db = db_next) { 1945 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1946 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1947 1948 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1949 break; 1950 } 1951 ASSERT3U(db->db_blkid, >=, start_blkid); 1952 1953 /* found a level 0 buffer in the range */ 1954 mutex_enter(&db->db_mtx); 1955 if (dbuf_undirty(db, tx)) { 1956 /* mutex has been dropped and dbuf destroyed */ 1957 continue; 1958 } 1959 1960 if (db->db_state == DB_UNCACHED || 1961 db->db_state == DB_NOFILL || 1962 db->db_state == DB_EVICTING) { 1963 ASSERT(db->db.db_data == NULL); 1964 mutex_exit(&db->db_mtx); 1965 continue; 1966 } 1967 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1968 /* will be handled in dbuf_read_done or dbuf_rele */ 1969 db->db_freed_in_flight = TRUE; 1970 mutex_exit(&db->db_mtx); 1971 continue; 1972 } 1973 if (zfs_refcount_count(&db->db_holds) == 0) { 1974 ASSERT(db->db_buf); 1975 dbuf_destroy(db); 1976 continue; 1977 } 1978 /* The dbuf is referenced */ 1979 1980 dr = list_head(&db->db_dirty_records); 1981 if (dr != NULL) { 1982 if (dr->dr_txg == txg) { 1983 /* 1984 * This buffer is "in-use", re-adjust the file 1985 * size to reflect that this buffer may 1986 * contain new data when we sync. 1987 */ 1988 if (db->db_blkid != DMU_SPILL_BLKID && 1989 db->db_blkid > dn->dn_maxblkid) 1990 dn->dn_maxblkid = db->db_blkid; 1991 dbuf_unoverride(dr); 1992 } else { 1993 /* 1994 * This dbuf is not dirty in the open context. 1995 * Either uncache it (if its not referenced in 1996 * the open context) or reset its contents to 1997 * empty. 1998 */ 1999 dbuf_fix_old_data(db, txg); 2000 } 2001 } 2002 /* clear the contents if its cached */ 2003 if (db->db_state == DB_CACHED) { 2004 ASSERT(db->db.db_data != NULL); 2005 arc_release(db->db_buf, db); 2006 rw_enter(&db->db_rwlock, RW_WRITER); 2007 memset(db->db.db_data, 0, db->db.db_size); 2008 rw_exit(&db->db_rwlock); 2009 arc_buf_freeze(db->db_buf); 2010 } 2011 2012 mutex_exit(&db->db_mtx); 2013 } 2014 2015 mutex_exit(&dn->dn_dbufs_mtx); 2016 kmem_free(db_search, sizeof (dmu_buf_impl_t)); 2017 } 2018 2019 void 2020 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 2021 { 2022 arc_buf_t *buf, *old_buf; 2023 dbuf_dirty_record_t *dr; 2024 int osize = db->db.db_size; 2025 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2026 dnode_t *dn; 2027 2028 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2029 2030 DB_DNODE_ENTER(db); 2031 dn = DB_DNODE(db); 2032 2033 /* 2034 * XXX we should be doing a dbuf_read, checking the return 2035 * value and returning that up to our callers 2036 */ 2037 dmu_buf_will_dirty(&db->db, tx); 2038 2039 /* create the data buffer for the new block */ 2040 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 2041 2042 /* copy old block data to the new block */ 2043 old_buf = db->db_buf; 2044 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size)); 2045 /* zero the remainder */ 2046 if (size > osize) 2047 memset((uint8_t *)buf->b_data + osize, 0, size - osize); 2048 2049 mutex_enter(&db->db_mtx); 2050 dbuf_set_data(db, buf); 2051 arc_buf_destroy(old_buf, db); 2052 db->db.db_size = size; 2053 2054 dr = list_head(&db->db_dirty_records); 2055 /* dirty record added by dmu_buf_will_dirty() */ 2056 VERIFY(dr != NULL); 2057 if (db->db_level == 0) 2058 dr->dt.dl.dr_data = buf; 2059 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2060 ASSERT3U(dr->dr_accounted, ==, osize); 2061 dr->dr_accounted = size; 2062 mutex_exit(&db->db_mtx); 2063 2064 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 2065 DB_DNODE_EXIT(db); 2066 } 2067 2068 void 2069 dbuf_release_bp(dmu_buf_impl_t *db) 2070 { 2071 objset_t *os __maybe_unused = db->db_objset; 2072 2073 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 2074 ASSERT(arc_released(os->os_phys_buf) || 2075 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 2076 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 2077 2078 (void) arc_release(db->db_buf, db); 2079 } 2080 2081 /* 2082 * We already have a dirty record for this TXG, and we are being 2083 * dirtied again. 2084 */ 2085 static void 2086 dbuf_redirty(dbuf_dirty_record_t *dr) 2087 { 2088 dmu_buf_impl_t *db = dr->dr_dbuf; 2089 2090 ASSERT(MUTEX_HELD(&db->db_mtx)); 2091 2092 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 2093 /* 2094 * If this buffer has already been written out, 2095 * we now need to reset its state. 2096 */ 2097 dbuf_unoverride(dr); 2098 if (db->db.db_object != DMU_META_DNODE_OBJECT && 2099 db->db_state != DB_NOFILL) { 2100 /* Already released on initial dirty, so just thaw. */ 2101 ASSERT(arc_released(db->db_buf)); 2102 arc_buf_thaw(db->db_buf); 2103 } 2104 } 2105 } 2106 2107 dbuf_dirty_record_t * 2108 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx) 2109 { 2110 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2111 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid); 2112 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE); 2113 ASSERT(dn->dn_maxblkid >= blkid); 2114 2115 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP); 2116 list_link_init(&dr->dr_dirty_node); 2117 list_link_init(&dr->dr_dbuf_node); 2118 dr->dr_dnode = dn; 2119 dr->dr_txg = tx->tx_txg; 2120 dr->dt.dll.dr_blkid = blkid; 2121 dr->dr_accounted = dn->dn_datablksz; 2122 2123 /* 2124 * There should not be any dbuf for the block that we're dirtying. 2125 * Otherwise the buffer contents could be inconsistent between the 2126 * dbuf and the lightweight dirty record. 2127 */ 2128 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid)); 2129 2130 mutex_enter(&dn->dn_mtx); 2131 int txgoff = tx->tx_txg & TXG_MASK; 2132 if (dn->dn_free_ranges[txgoff] != NULL) { 2133 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1); 2134 } 2135 2136 if (dn->dn_nlevels == 1) { 2137 ASSERT3U(blkid, <, dn->dn_nblkptr); 2138 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2139 mutex_exit(&dn->dn_mtx); 2140 rw_exit(&dn->dn_struct_rwlock); 2141 dnode_setdirty(dn, tx); 2142 } else { 2143 mutex_exit(&dn->dn_mtx); 2144 2145 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2146 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn, 2147 1, blkid >> epbs, FTAG); 2148 rw_exit(&dn->dn_struct_rwlock); 2149 if (parent_db == NULL) { 2150 kmem_free(dr, sizeof (*dr)); 2151 return (NULL); 2152 } 2153 int err = dbuf_read(parent_db, NULL, 2154 (DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2155 if (err != 0) { 2156 dbuf_rele(parent_db, FTAG); 2157 kmem_free(dr, sizeof (*dr)); 2158 return (NULL); 2159 } 2160 2161 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx); 2162 dbuf_rele(parent_db, FTAG); 2163 mutex_enter(&parent_dr->dt.di.dr_mtx); 2164 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg); 2165 list_insert_tail(&parent_dr->dt.di.dr_children, dr); 2166 mutex_exit(&parent_dr->dt.di.dr_mtx); 2167 dr->dr_parent = parent_dr; 2168 } 2169 2170 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx); 2171 2172 return (dr); 2173 } 2174 2175 dbuf_dirty_record_t * 2176 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2177 { 2178 dnode_t *dn; 2179 objset_t *os; 2180 dbuf_dirty_record_t *dr, *dr_next, *dr_head; 2181 int txgoff = tx->tx_txg & TXG_MASK; 2182 boolean_t drop_struct_rwlock = B_FALSE; 2183 2184 ASSERT(tx->tx_txg != 0); 2185 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2186 DMU_TX_DIRTY_BUF(tx, db); 2187 2188 DB_DNODE_ENTER(db); 2189 dn = DB_DNODE(db); 2190 /* 2191 * Shouldn't dirty a regular buffer in syncing context. Private 2192 * objects may be dirtied in syncing context, but only if they 2193 * were already pre-dirtied in open context. 2194 */ 2195 #ifdef ZFS_DEBUG 2196 if (dn->dn_objset->os_dsl_dataset != NULL) { 2197 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 2198 RW_READER, FTAG); 2199 } 2200 ASSERT(!dmu_tx_is_syncing(tx) || 2201 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 2202 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2203 dn->dn_objset->os_dsl_dataset == NULL); 2204 if (dn->dn_objset->os_dsl_dataset != NULL) 2205 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 2206 #endif 2207 /* 2208 * We make this assert for private objects as well, but after we 2209 * check if we're already dirty. They are allowed to re-dirty 2210 * in syncing context. 2211 */ 2212 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2213 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2214 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2215 2216 mutex_enter(&db->db_mtx); 2217 /* 2218 * XXX make this true for indirects too? The problem is that 2219 * transactions created with dmu_tx_create_assigned() from 2220 * syncing context don't bother holding ahead. 2221 */ 2222 ASSERT(db->db_level != 0 || 2223 db->db_state == DB_CACHED || db->db_state == DB_FILL || 2224 db->db_state == DB_NOFILL); 2225 2226 mutex_enter(&dn->dn_mtx); 2227 dnode_set_dirtyctx(dn, tx, db); 2228 if (tx->tx_txg > dn->dn_dirty_txg) 2229 dn->dn_dirty_txg = tx->tx_txg; 2230 mutex_exit(&dn->dn_mtx); 2231 2232 if (db->db_blkid == DMU_SPILL_BLKID) 2233 dn->dn_have_spill = B_TRUE; 2234 2235 /* 2236 * If this buffer is already dirty, we're done. 2237 */ 2238 dr_head = list_head(&db->db_dirty_records); 2239 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg || 2240 db->db.db_object == DMU_META_DNODE_OBJECT); 2241 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg); 2242 if (dr_next && dr_next->dr_txg == tx->tx_txg) { 2243 DB_DNODE_EXIT(db); 2244 2245 dbuf_redirty(dr_next); 2246 mutex_exit(&db->db_mtx); 2247 return (dr_next); 2248 } 2249 2250 /* 2251 * Only valid if not already dirty. 2252 */ 2253 ASSERT(dn->dn_object == 0 || 2254 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2255 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2256 2257 ASSERT3U(dn->dn_nlevels, >, db->db_level); 2258 2259 /* 2260 * We should only be dirtying in syncing context if it's the 2261 * mos or we're initializing the os or it's a special object. 2262 * However, we are allowed to dirty in syncing context provided 2263 * we already dirtied it in open context. Hence we must make 2264 * this assertion only if we're not already dirty. 2265 */ 2266 os = dn->dn_objset; 2267 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 2268 #ifdef ZFS_DEBUG 2269 if (dn->dn_objset->os_dsl_dataset != NULL) 2270 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 2271 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2272 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 2273 if (dn->dn_objset->os_dsl_dataset != NULL) 2274 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 2275 #endif 2276 ASSERT(db->db.db_size != 0); 2277 2278 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2279 2280 if (db->db_blkid != DMU_BONUS_BLKID) { 2281 dmu_objset_willuse_space(os, db->db.db_size, tx); 2282 } 2283 2284 /* 2285 * If this buffer is dirty in an old transaction group we need 2286 * to make a copy of it so that the changes we make in this 2287 * transaction group won't leak out when we sync the older txg. 2288 */ 2289 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 2290 list_link_init(&dr->dr_dirty_node); 2291 list_link_init(&dr->dr_dbuf_node); 2292 dr->dr_dnode = dn; 2293 if (db->db_level == 0) { 2294 void *data_old = db->db_buf; 2295 2296 if (db->db_state != DB_NOFILL) { 2297 if (db->db_blkid == DMU_BONUS_BLKID) { 2298 dbuf_fix_old_data(db, tx->tx_txg); 2299 data_old = db->db.db_data; 2300 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 2301 /* 2302 * Release the data buffer from the cache so 2303 * that we can modify it without impacting 2304 * possible other users of this cached data 2305 * block. Note that indirect blocks and 2306 * private objects are not released until the 2307 * syncing state (since they are only modified 2308 * then). 2309 */ 2310 arc_release(db->db_buf, db); 2311 dbuf_fix_old_data(db, tx->tx_txg); 2312 data_old = db->db_buf; 2313 } 2314 ASSERT(data_old != NULL); 2315 } 2316 dr->dt.dl.dr_data = data_old; 2317 } else { 2318 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); 2319 list_create(&dr->dt.di.dr_children, 2320 sizeof (dbuf_dirty_record_t), 2321 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 2322 } 2323 if (db->db_blkid != DMU_BONUS_BLKID) 2324 dr->dr_accounted = db->db.db_size; 2325 dr->dr_dbuf = db; 2326 dr->dr_txg = tx->tx_txg; 2327 list_insert_before(&db->db_dirty_records, dr_next, dr); 2328 2329 /* 2330 * We could have been freed_in_flight between the dbuf_noread 2331 * and dbuf_dirty. We win, as though the dbuf_noread() had 2332 * happened after the free. 2333 */ 2334 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2335 db->db_blkid != DMU_SPILL_BLKID) { 2336 mutex_enter(&dn->dn_mtx); 2337 if (dn->dn_free_ranges[txgoff] != NULL) { 2338 range_tree_clear(dn->dn_free_ranges[txgoff], 2339 db->db_blkid, 1); 2340 } 2341 mutex_exit(&dn->dn_mtx); 2342 db->db_freed_in_flight = FALSE; 2343 } 2344 2345 /* 2346 * This buffer is now part of this txg 2347 */ 2348 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 2349 db->db_dirtycnt += 1; 2350 ASSERT3U(db->db_dirtycnt, <=, 3); 2351 2352 mutex_exit(&db->db_mtx); 2353 2354 if (db->db_blkid == DMU_BONUS_BLKID || 2355 db->db_blkid == DMU_SPILL_BLKID) { 2356 mutex_enter(&dn->dn_mtx); 2357 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2358 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2359 mutex_exit(&dn->dn_mtx); 2360 dnode_setdirty(dn, tx); 2361 DB_DNODE_EXIT(db); 2362 return (dr); 2363 } 2364 2365 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 2366 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2367 drop_struct_rwlock = B_TRUE; 2368 } 2369 2370 /* 2371 * If we are overwriting a dedup BP, then unless it is snapshotted, 2372 * when we get to syncing context we will need to decrement its 2373 * refcount in the DDT. Prefetch the relevant DDT block so that 2374 * syncing context won't have to wait for the i/o. 2375 */ 2376 if (db->db_blkptr != NULL) { 2377 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 2378 ddt_prefetch(os->os_spa, db->db_blkptr); 2379 dmu_buf_unlock_parent(db, dblt, FTAG); 2380 } 2381 2382 /* 2383 * We need to hold the dn_struct_rwlock to make this assertion, 2384 * because it protects dn_phys / dn_next_nlevels from changing. 2385 */ 2386 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 2387 dn->dn_phys->dn_nlevels > db->db_level || 2388 dn->dn_next_nlevels[txgoff] > db->db_level || 2389 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 2390 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 2391 2392 2393 if (db->db_level == 0) { 2394 ASSERT(!db->db_objset->os_raw_receive || 2395 dn->dn_maxblkid >= db->db_blkid); 2396 dnode_new_blkid(dn, db->db_blkid, tx, 2397 drop_struct_rwlock, B_FALSE); 2398 ASSERT(dn->dn_maxblkid >= db->db_blkid); 2399 } 2400 2401 if (db->db_level+1 < dn->dn_nlevels) { 2402 dmu_buf_impl_t *parent = db->db_parent; 2403 dbuf_dirty_record_t *di; 2404 int parent_held = FALSE; 2405 2406 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 2407 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2408 parent = dbuf_hold_level(dn, db->db_level + 1, 2409 db->db_blkid >> epbs, FTAG); 2410 ASSERT(parent != NULL); 2411 parent_held = TRUE; 2412 } 2413 if (drop_struct_rwlock) 2414 rw_exit(&dn->dn_struct_rwlock); 2415 ASSERT3U(db->db_level + 1, ==, parent->db_level); 2416 di = dbuf_dirty(parent, tx); 2417 if (parent_held) 2418 dbuf_rele(parent, FTAG); 2419 2420 mutex_enter(&db->db_mtx); 2421 /* 2422 * Since we've dropped the mutex, it's possible that 2423 * dbuf_undirty() might have changed this out from under us. 2424 */ 2425 if (list_head(&db->db_dirty_records) == dr || 2426 dn->dn_object == DMU_META_DNODE_OBJECT) { 2427 mutex_enter(&di->dt.di.dr_mtx); 2428 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 2429 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2430 list_insert_tail(&di->dt.di.dr_children, dr); 2431 mutex_exit(&di->dt.di.dr_mtx); 2432 dr->dr_parent = di; 2433 } 2434 mutex_exit(&db->db_mtx); 2435 } else { 2436 ASSERT(db->db_level + 1 == dn->dn_nlevels); 2437 ASSERT(db->db_blkid < dn->dn_nblkptr); 2438 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 2439 mutex_enter(&dn->dn_mtx); 2440 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2441 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2442 mutex_exit(&dn->dn_mtx); 2443 if (drop_struct_rwlock) 2444 rw_exit(&dn->dn_struct_rwlock); 2445 } 2446 2447 dnode_setdirty(dn, tx); 2448 DB_DNODE_EXIT(db); 2449 return (dr); 2450 } 2451 2452 static void 2453 dbuf_undirty_bonus(dbuf_dirty_record_t *dr) 2454 { 2455 dmu_buf_impl_t *db = dr->dr_dbuf; 2456 2457 if (dr->dt.dl.dr_data != db->db.db_data) { 2458 struct dnode *dn = dr->dr_dnode; 2459 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 2460 2461 kmem_free(dr->dt.dl.dr_data, max_bonuslen); 2462 arc_space_return(max_bonuslen, ARC_SPACE_BONUS); 2463 } 2464 db->db_data_pending = NULL; 2465 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 2466 list_remove(&db->db_dirty_records, dr); 2467 if (dr->dr_dbuf->db_level != 0) { 2468 mutex_destroy(&dr->dt.di.dr_mtx); 2469 list_destroy(&dr->dt.di.dr_children); 2470 } 2471 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2472 ASSERT3U(db->db_dirtycnt, >, 0); 2473 db->db_dirtycnt -= 1; 2474 } 2475 2476 /* 2477 * Undirty a buffer in the transaction group referenced by the given 2478 * transaction. Return whether this evicted the dbuf. 2479 */ 2480 static boolean_t 2481 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2482 { 2483 uint64_t txg = tx->tx_txg; 2484 2485 ASSERT(txg != 0); 2486 2487 /* 2488 * Due to our use of dn_nlevels below, this can only be called 2489 * in open context, unless we are operating on the MOS. 2490 * From syncing context, dn_nlevels may be different from the 2491 * dn_nlevels used when dbuf was dirtied. 2492 */ 2493 ASSERT(db->db_objset == 2494 dmu_objset_pool(db->db_objset)->dp_meta_objset || 2495 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 2496 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2497 ASSERT0(db->db_level); 2498 ASSERT(MUTEX_HELD(&db->db_mtx)); 2499 2500 /* 2501 * If this buffer is not dirty, we're done. 2502 */ 2503 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg); 2504 if (dr == NULL) 2505 return (B_FALSE); 2506 ASSERT(dr->dr_dbuf == db); 2507 2508 dnode_t *dn = dr->dr_dnode; 2509 2510 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2511 2512 ASSERT(db->db.db_size != 0); 2513 2514 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 2515 dr->dr_accounted, txg); 2516 2517 list_remove(&db->db_dirty_records, dr); 2518 2519 /* 2520 * Note that there are three places in dbuf_dirty() 2521 * where this dirty record may be put on a list. 2522 * Make sure to do a list_remove corresponding to 2523 * every one of those list_insert calls. 2524 */ 2525 if (dr->dr_parent) { 2526 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 2527 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 2528 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 2529 } else if (db->db_blkid == DMU_SPILL_BLKID || 2530 db->db_level + 1 == dn->dn_nlevels) { 2531 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 2532 mutex_enter(&dn->dn_mtx); 2533 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 2534 mutex_exit(&dn->dn_mtx); 2535 } 2536 2537 if (db->db_state != DB_NOFILL) { 2538 dbuf_unoverride(dr); 2539 2540 ASSERT(db->db_buf != NULL); 2541 ASSERT(dr->dt.dl.dr_data != NULL); 2542 if (dr->dt.dl.dr_data != db->db_buf) 2543 arc_buf_destroy(dr->dt.dl.dr_data, db); 2544 } 2545 2546 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2547 2548 ASSERT(db->db_dirtycnt > 0); 2549 db->db_dirtycnt -= 1; 2550 2551 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 2552 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 2553 dbuf_destroy(db); 2554 return (B_TRUE); 2555 } 2556 2557 return (B_FALSE); 2558 } 2559 2560 static void 2561 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) 2562 { 2563 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2564 2565 ASSERT(tx->tx_txg != 0); 2566 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2567 2568 /* 2569 * Quick check for dirtiness. For already dirty blocks, this 2570 * reduces runtime of this function by >90%, and overall performance 2571 * by 50% for some workloads (e.g. file deletion with indirect blocks 2572 * cached). 2573 */ 2574 mutex_enter(&db->db_mtx); 2575 2576 if (db->db_state == DB_CACHED) { 2577 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2578 /* 2579 * It's possible that it is already dirty but not cached, 2580 * because there are some calls to dbuf_dirty() that don't 2581 * go through dmu_buf_will_dirty(). 2582 */ 2583 if (dr != NULL) { 2584 /* This dbuf is already dirty and cached. */ 2585 dbuf_redirty(dr); 2586 mutex_exit(&db->db_mtx); 2587 return; 2588 } 2589 } 2590 mutex_exit(&db->db_mtx); 2591 2592 DB_DNODE_ENTER(db); 2593 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 2594 flags |= DB_RF_HAVESTRUCT; 2595 DB_DNODE_EXIT(db); 2596 (void) dbuf_read(db, NULL, flags); 2597 (void) dbuf_dirty(db, tx); 2598 } 2599 2600 void 2601 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2602 { 2603 dmu_buf_will_dirty_impl(db_fake, 2604 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); 2605 } 2606 2607 boolean_t 2608 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2609 { 2610 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2611 dbuf_dirty_record_t *dr; 2612 2613 mutex_enter(&db->db_mtx); 2614 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2615 mutex_exit(&db->db_mtx); 2616 return (dr != NULL); 2617 } 2618 2619 void 2620 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2621 { 2622 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2623 2624 db->db_state = DB_NOFILL; 2625 DTRACE_SET_STATE(db, "allocating NOFILL buffer"); 2626 dmu_buf_will_fill(db_fake, tx); 2627 } 2628 2629 void 2630 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2631 { 2632 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2633 2634 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2635 ASSERT(tx->tx_txg != 0); 2636 ASSERT(db->db_level == 0); 2637 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2638 2639 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 2640 dmu_tx_private_ok(tx)); 2641 2642 dbuf_noread(db); 2643 (void) dbuf_dirty(db, tx); 2644 } 2645 2646 /* 2647 * This function is effectively the same as dmu_buf_will_dirty(), but 2648 * indicates the caller expects raw encrypted data in the db, and provides 2649 * the crypt params (byteorder, salt, iv, mac) which should be stored in the 2650 * blkptr_t when this dbuf is written. This is only used for blocks of 2651 * dnodes, during raw receive. 2652 */ 2653 void 2654 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, 2655 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) 2656 { 2657 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2658 dbuf_dirty_record_t *dr; 2659 2660 /* 2661 * dr_has_raw_params is only processed for blocks of dnodes 2662 * (see dbuf_sync_dnode_leaf_crypt()). 2663 */ 2664 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 2665 ASSERT3U(db->db_level, ==, 0); 2666 ASSERT(db->db_objset->os_raw_receive); 2667 2668 dmu_buf_will_dirty_impl(db_fake, 2669 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); 2670 2671 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2672 2673 ASSERT3P(dr, !=, NULL); 2674 2675 dr->dt.dl.dr_has_raw_params = B_TRUE; 2676 dr->dt.dl.dr_byteorder = byteorder; 2677 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN); 2678 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN); 2679 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN); 2680 } 2681 2682 static void 2683 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx) 2684 { 2685 struct dirty_leaf *dl; 2686 dbuf_dirty_record_t *dr; 2687 2688 dr = list_head(&db->db_dirty_records); 2689 ASSERT3P(dr, !=, NULL); 2690 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2691 dl = &dr->dt.dl; 2692 dl->dr_overridden_by = *bp; 2693 dl->dr_override_state = DR_OVERRIDDEN; 2694 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2695 } 2696 2697 void 2698 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx) 2699 { 2700 (void) tx; 2701 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2702 dbuf_states_t old_state; 2703 mutex_enter(&db->db_mtx); 2704 DBUF_VERIFY(db); 2705 2706 old_state = db->db_state; 2707 db->db_state = DB_CACHED; 2708 if (old_state == DB_FILL) { 2709 if (db->db_level == 0 && db->db_freed_in_flight) { 2710 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2711 /* we were freed while filling */ 2712 /* XXX dbuf_undirty? */ 2713 memset(db->db.db_data, 0, db->db.db_size); 2714 db->db_freed_in_flight = FALSE; 2715 DTRACE_SET_STATE(db, 2716 "fill done handling freed in flight"); 2717 } else { 2718 DTRACE_SET_STATE(db, "fill done"); 2719 } 2720 cv_broadcast(&db->db_changed); 2721 } 2722 mutex_exit(&db->db_mtx); 2723 } 2724 2725 void 2726 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 2727 bp_embedded_type_t etype, enum zio_compress comp, 2728 int uncompressed_size, int compressed_size, int byteorder, 2729 dmu_tx_t *tx) 2730 { 2731 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2732 struct dirty_leaf *dl; 2733 dmu_object_type_t type; 2734 dbuf_dirty_record_t *dr; 2735 2736 if (etype == BP_EMBEDDED_TYPE_DATA) { 2737 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 2738 SPA_FEATURE_EMBEDDED_DATA)); 2739 } 2740 2741 DB_DNODE_ENTER(db); 2742 type = DB_DNODE(db)->dn_type; 2743 DB_DNODE_EXIT(db); 2744 2745 ASSERT0(db->db_level); 2746 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2747 2748 dmu_buf_will_not_fill(dbuf, tx); 2749 2750 dr = list_head(&db->db_dirty_records); 2751 ASSERT3P(dr, !=, NULL); 2752 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2753 dl = &dr->dt.dl; 2754 encode_embedded_bp_compressed(&dl->dr_overridden_by, 2755 data, comp, uncompressed_size, compressed_size); 2756 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 2757 BP_SET_TYPE(&dl->dr_overridden_by, type); 2758 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 2759 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 2760 2761 dl->dr_override_state = DR_OVERRIDDEN; 2762 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2763 } 2764 2765 void 2766 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx) 2767 { 2768 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2769 dmu_object_type_t type; 2770 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset, 2771 SPA_FEATURE_REDACTED_DATASETS)); 2772 2773 DB_DNODE_ENTER(db); 2774 type = DB_DNODE(db)->dn_type; 2775 DB_DNODE_EXIT(db); 2776 2777 ASSERT0(db->db_level); 2778 dmu_buf_will_not_fill(dbuf, tx); 2779 2780 blkptr_t bp = { { { {0} } } }; 2781 BP_SET_TYPE(&bp, type); 2782 BP_SET_LEVEL(&bp, 0); 2783 BP_SET_BIRTH(&bp, tx->tx_txg, 0); 2784 BP_SET_REDACTED(&bp); 2785 BPE_SET_LSIZE(&bp, dbuf->db_size); 2786 2787 dbuf_override_impl(db, &bp, tx); 2788 } 2789 2790 /* 2791 * Directly assign a provided arc buf to a given dbuf if it's not referenced 2792 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 2793 */ 2794 void 2795 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 2796 { 2797 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2798 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2799 ASSERT(db->db_level == 0); 2800 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 2801 ASSERT(buf != NULL); 2802 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); 2803 ASSERT(tx->tx_txg != 0); 2804 2805 arc_return_buf(buf, db); 2806 ASSERT(arc_released(buf)); 2807 2808 mutex_enter(&db->db_mtx); 2809 2810 while (db->db_state == DB_READ || db->db_state == DB_FILL) 2811 cv_wait(&db->db_changed, &db->db_mtx); 2812 2813 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 2814 2815 if (db->db_state == DB_CACHED && 2816 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2817 /* 2818 * In practice, we will never have a case where we have an 2819 * encrypted arc buffer while additional holds exist on the 2820 * dbuf. We don't handle this here so we simply assert that 2821 * fact instead. 2822 */ 2823 ASSERT(!arc_is_encrypted(buf)); 2824 mutex_exit(&db->db_mtx); 2825 (void) dbuf_dirty(db, tx); 2826 memcpy(db->db.db_data, buf->b_data, db->db.db_size); 2827 arc_buf_destroy(buf, db); 2828 return; 2829 } 2830 2831 if (db->db_state == DB_CACHED) { 2832 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 2833 2834 ASSERT(db->db_buf != NULL); 2835 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 2836 ASSERT(dr->dt.dl.dr_data == db->db_buf); 2837 2838 if (!arc_released(db->db_buf)) { 2839 ASSERT(dr->dt.dl.dr_override_state == 2840 DR_OVERRIDDEN); 2841 arc_release(db->db_buf, db); 2842 } 2843 dr->dt.dl.dr_data = buf; 2844 arc_buf_destroy(db->db_buf, db); 2845 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2846 arc_release(db->db_buf, db); 2847 arc_buf_destroy(db->db_buf, db); 2848 } 2849 db->db_buf = NULL; 2850 } 2851 ASSERT(db->db_buf == NULL); 2852 dbuf_set_data(db, buf); 2853 db->db_state = DB_FILL; 2854 DTRACE_SET_STATE(db, "filling assigned arcbuf"); 2855 mutex_exit(&db->db_mtx); 2856 (void) dbuf_dirty(db, tx); 2857 dmu_buf_fill_done(&db->db, tx); 2858 } 2859 2860 void 2861 dbuf_destroy(dmu_buf_impl_t *db) 2862 { 2863 dnode_t *dn; 2864 dmu_buf_impl_t *parent = db->db_parent; 2865 dmu_buf_impl_t *dndb; 2866 2867 ASSERT(MUTEX_HELD(&db->db_mtx)); 2868 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2869 2870 if (db->db_buf != NULL) { 2871 arc_buf_destroy(db->db_buf, db); 2872 db->db_buf = NULL; 2873 } 2874 2875 if (db->db_blkid == DMU_BONUS_BLKID) { 2876 int slots = DB_DNODE(db)->dn_num_slots; 2877 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 2878 if (db->db.db_data != NULL) { 2879 kmem_free(db->db.db_data, bonuslen); 2880 arc_space_return(bonuslen, ARC_SPACE_BONUS); 2881 db->db_state = DB_UNCACHED; 2882 DTRACE_SET_STATE(db, "buffer cleared"); 2883 } 2884 } 2885 2886 dbuf_clear_data(db); 2887 2888 if (multilist_link_active(&db->db_cache_link)) { 2889 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2890 db->db_caching_status == DB_DBUF_METADATA_CACHE); 2891 2892 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 2893 (void) zfs_refcount_remove_many( 2894 &dbuf_caches[db->db_caching_status].size, 2895 db->db.db_size, db); 2896 2897 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 2898 DBUF_STAT_BUMPDOWN(metadata_cache_count); 2899 } else { 2900 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 2901 DBUF_STAT_BUMPDOWN(cache_count); 2902 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 2903 db->db.db_size); 2904 } 2905 db->db_caching_status = DB_NO_CACHE; 2906 } 2907 2908 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2909 ASSERT(db->db_data_pending == NULL); 2910 ASSERT(list_is_empty(&db->db_dirty_records)); 2911 2912 db->db_state = DB_EVICTING; 2913 DTRACE_SET_STATE(db, "buffer eviction started"); 2914 db->db_blkptr = NULL; 2915 2916 /* 2917 * Now that db_state is DB_EVICTING, nobody else can find this via 2918 * the hash table. We can now drop db_mtx, which allows us to 2919 * acquire the dn_dbufs_mtx. 2920 */ 2921 mutex_exit(&db->db_mtx); 2922 2923 DB_DNODE_ENTER(db); 2924 dn = DB_DNODE(db); 2925 dndb = dn->dn_dbuf; 2926 if (db->db_blkid != DMU_BONUS_BLKID) { 2927 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2928 if (needlock) 2929 mutex_enter_nested(&dn->dn_dbufs_mtx, 2930 NESTED_SINGLE); 2931 avl_remove(&dn->dn_dbufs, db); 2932 membar_producer(); 2933 DB_DNODE_EXIT(db); 2934 if (needlock) 2935 mutex_exit(&dn->dn_dbufs_mtx); 2936 /* 2937 * Decrementing the dbuf count means that the hold corresponding 2938 * to the removed dbuf is no longer discounted in dnode_move(), 2939 * so the dnode cannot be moved until after we release the hold. 2940 * The membar_producer() ensures visibility of the decremented 2941 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2942 * release any lock. 2943 */ 2944 mutex_enter(&dn->dn_mtx); 2945 dnode_rele_and_unlock(dn, db, B_TRUE); 2946 db->db_dnode_handle = NULL; 2947 2948 dbuf_hash_remove(db); 2949 } else { 2950 DB_DNODE_EXIT(db); 2951 } 2952 2953 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2954 2955 db->db_parent = NULL; 2956 2957 ASSERT(db->db_buf == NULL); 2958 ASSERT(db->db.db_data == NULL); 2959 ASSERT(db->db_hash_next == NULL); 2960 ASSERT(db->db_blkptr == NULL); 2961 ASSERT(db->db_data_pending == NULL); 2962 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2963 ASSERT(!multilist_link_active(&db->db_cache_link)); 2964 2965 /* 2966 * If this dbuf is referenced from an indirect dbuf, 2967 * decrement the ref count on the indirect dbuf. 2968 */ 2969 if (parent && parent != dndb) { 2970 mutex_enter(&parent->db_mtx); 2971 dbuf_rele_and_unlock(parent, db, B_TRUE); 2972 } 2973 2974 kmem_cache_free(dbuf_kmem_cache, db); 2975 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 2976 } 2977 2978 /* 2979 * Note: While bpp will always be updated if the function returns success, 2980 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2981 * this happens when the dnode is the meta-dnode, or {user|group|project}used 2982 * object. 2983 */ 2984 __attribute__((always_inline)) 2985 static inline int 2986 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2987 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2988 { 2989 *parentp = NULL; 2990 *bpp = NULL; 2991 2992 ASSERT(blkid != DMU_BONUS_BLKID); 2993 2994 if (blkid == DMU_SPILL_BLKID) { 2995 mutex_enter(&dn->dn_mtx); 2996 if (dn->dn_have_spill && 2997 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 2998 *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 2999 else 3000 *bpp = NULL; 3001 dbuf_add_ref(dn->dn_dbuf, NULL); 3002 *parentp = dn->dn_dbuf; 3003 mutex_exit(&dn->dn_mtx); 3004 return (0); 3005 } 3006 3007 int nlevels = 3008 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 3009 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 3010 3011 ASSERT3U(level * epbs, <, 64); 3012 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3013 /* 3014 * This assertion shouldn't trip as long as the max indirect block size 3015 * is less than 1M. The reason for this is that up to that point, 3016 * the number of levels required to address an entire object with blocks 3017 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 3018 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 3019 * (i.e. we can address the entire object), objects will all use at most 3020 * N-1 levels and the assertion won't overflow. However, once epbs is 3021 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 3022 * enough to address an entire object, so objects will have 5 levels, 3023 * but then this assertion will overflow. 3024 * 3025 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 3026 * need to redo this logic to handle overflows. 3027 */ 3028 ASSERT(level >= nlevels || 3029 ((nlevels - level - 1) * epbs) + 3030 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 3031 if (level >= nlevels || 3032 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 3033 ((nlevels - level - 1) * epbs)) || 3034 (fail_sparse && 3035 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 3036 /* the buffer has no parent yet */ 3037 return (SET_ERROR(ENOENT)); 3038 } else if (level < nlevels-1) { 3039 /* this block is referenced from an indirect block */ 3040 int err; 3041 3042 err = dbuf_hold_impl(dn, level + 1, 3043 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 3044 3045 if (err) 3046 return (err); 3047 err = dbuf_read(*parentp, NULL, 3048 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 3049 if (err) { 3050 dbuf_rele(*parentp, NULL); 3051 *parentp = NULL; 3052 return (err); 3053 } 3054 rw_enter(&(*parentp)->db_rwlock, RW_READER); 3055 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 3056 (blkid & ((1ULL << epbs) - 1)); 3057 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 3058 ASSERT(BP_IS_HOLE(*bpp)); 3059 rw_exit(&(*parentp)->db_rwlock); 3060 return (0); 3061 } else { 3062 /* the block is referenced from the dnode */ 3063 ASSERT3U(level, ==, nlevels-1); 3064 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 3065 blkid < dn->dn_phys->dn_nblkptr); 3066 if (dn->dn_dbuf) { 3067 dbuf_add_ref(dn->dn_dbuf, NULL); 3068 *parentp = dn->dn_dbuf; 3069 } 3070 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 3071 return (0); 3072 } 3073 } 3074 3075 static dmu_buf_impl_t * 3076 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 3077 dmu_buf_impl_t *parent, blkptr_t *blkptr) 3078 { 3079 objset_t *os = dn->dn_objset; 3080 dmu_buf_impl_t *db, *odb; 3081 3082 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3083 ASSERT(dn->dn_type != DMU_OT_NONE); 3084 3085 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 3086 3087 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t), 3088 offsetof(dbuf_dirty_record_t, dr_dbuf_node)); 3089 3090 db->db_objset = os; 3091 db->db.db_object = dn->dn_object; 3092 db->db_level = level; 3093 db->db_blkid = blkid; 3094 db->db_dirtycnt = 0; 3095 db->db_dnode_handle = dn->dn_handle; 3096 db->db_parent = parent; 3097 db->db_blkptr = blkptr; 3098 3099 db->db_user = NULL; 3100 db->db_user_immediate_evict = FALSE; 3101 db->db_freed_in_flight = FALSE; 3102 db->db_pending_evict = FALSE; 3103 3104 if (blkid == DMU_BONUS_BLKID) { 3105 ASSERT3P(parent, ==, dn->dn_dbuf); 3106 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 3107 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 3108 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 3109 db->db.db_offset = DMU_BONUS_BLKID; 3110 db->db_state = DB_UNCACHED; 3111 DTRACE_SET_STATE(db, "bonus buffer created"); 3112 db->db_caching_status = DB_NO_CACHE; 3113 /* the bonus dbuf is not placed in the hash table */ 3114 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3115 return (db); 3116 } else if (blkid == DMU_SPILL_BLKID) { 3117 db->db.db_size = (blkptr != NULL) ? 3118 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 3119 db->db.db_offset = 0; 3120 } else { 3121 int blocksize = 3122 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 3123 db->db.db_size = blocksize; 3124 db->db.db_offset = db->db_blkid * blocksize; 3125 } 3126 3127 /* 3128 * Hold the dn_dbufs_mtx while we get the new dbuf 3129 * in the hash table *and* added to the dbufs list. 3130 * This prevents a possible deadlock with someone 3131 * trying to look up this dbuf before it's added to the 3132 * dn_dbufs list. 3133 */ 3134 mutex_enter(&dn->dn_dbufs_mtx); 3135 db->db_state = DB_EVICTING; /* not worth logging this state change */ 3136 if ((odb = dbuf_hash_insert(db)) != NULL) { 3137 /* someone else inserted it first */ 3138 mutex_exit(&dn->dn_dbufs_mtx); 3139 kmem_cache_free(dbuf_kmem_cache, db); 3140 DBUF_STAT_BUMP(hash_insert_race); 3141 return (odb); 3142 } 3143 avl_add(&dn->dn_dbufs, db); 3144 3145 db->db_state = DB_UNCACHED; 3146 DTRACE_SET_STATE(db, "regular buffer created"); 3147 db->db_caching_status = DB_NO_CACHE; 3148 mutex_exit(&dn->dn_dbufs_mtx); 3149 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3150 3151 if (parent && parent != dn->dn_dbuf) 3152 dbuf_add_ref(parent, db); 3153 3154 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 3155 zfs_refcount_count(&dn->dn_holds) > 0); 3156 (void) zfs_refcount_add(&dn->dn_holds, db); 3157 3158 dprintf_dbuf(db, "db=%p\n", db); 3159 3160 return (db); 3161 } 3162 3163 /* 3164 * This function returns a block pointer and information about the object, 3165 * given a dnode and a block. This is a publicly accessible version of 3166 * dbuf_findbp that only returns some information, rather than the 3167 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock 3168 * should be locked as (at least) a reader. 3169 */ 3170 int 3171 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid, 3172 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift) 3173 { 3174 dmu_buf_impl_t *dbp = NULL; 3175 blkptr_t *bp2; 3176 int err = 0; 3177 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3178 3179 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2); 3180 if (err == 0) { 3181 *bp = *bp2; 3182 if (dbp != NULL) 3183 dbuf_rele(dbp, NULL); 3184 if (datablkszsec != NULL) 3185 *datablkszsec = dn->dn_phys->dn_datablkszsec; 3186 if (indblkshift != NULL) 3187 *indblkshift = dn->dn_phys->dn_indblkshift; 3188 } 3189 3190 return (err); 3191 } 3192 3193 typedef struct dbuf_prefetch_arg { 3194 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 3195 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 3196 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 3197 int dpa_curlevel; /* The current level that we're reading */ 3198 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 3199 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 3200 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 3201 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 3202 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */ 3203 void *dpa_arg; /* prefetch completion arg */ 3204 } dbuf_prefetch_arg_t; 3205 3206 static void 3207 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done) 3208 { 3209 if (dpa->dpa_cb != NULL) { 3210 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level, 3211 dpa->dpa_zb.zb_blkid, io_done); 3212 } 3213 kmem_free(dpa, sizeof (*dpa)); 3214 } 3215 3216 static void 3217 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb, 3218 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3219 { 3220 (void) zio, (void) zb, (void) iobp; 3221 dbuf_prefetch_arg_t *dpa = private; 3222 3223 if (abuf != NULL) 3224 arc_buf_destroy(abuf, private); 3225 3226 dbuf_prefetch_fini(dpa, B_TRUE); 3227 } 3228 3229 /* 3230 * Actually issue the prefetch read for the block given. 3231 */ 3232 static void 3233 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 3234 { 3235 ASSERT(!BP_IS_REDACTED(bp) || 3236 dsl_dataset_feature_is_active( 3237 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3238 SPA_FEATURE_REDACTED_DATASETS)); 3239 3240 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 3241 return (dbuf_prefetch_fini(dpa, B_FALSE)); 3242 3243 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 3244 arc_flags_t aflags = 3245 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 3246 ARC_FLAG_NO_BUF; 3247 3248 /* dnodes are always read as raw and then converted later */ 3249 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && 3250 dpa->dpa_curlevel == 0) 3251 zio_flags |= ZIO_FLAG_RAW; 3252 3253 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3254 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 3255 ASSERT(dpa->dpa_zio != NULL); 3256 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, 3257 dbuf_issue_final_prefetch_done, dpa, 3258 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); 3259 } 3260 3261 /* 3262 * Called when an indirect block above our prefetch target is read in. This 3263 * will either read in the next indirect block down the tree or issue the actual 3264 * prefetch if the next block down is our target. 3265 */ 3266 static void 3267 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 3268 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3269 { 3270 (void) zb, (void) iobp; 3271 dbuf_prefetch_arg_t *dpa = private; 3272 3273 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 3274 ASSERT3S(dpa->dpa_curlevel, >, 0); 3275 3276 if (abuf == NULL) { 3277 ASSERT(zio == NULL || zio->io_error != 0); 3278 dbuf_prefetch_fini(dpa, B_TRUE); 3279 return; 3280 } 3281 ASSERT(zio == NULL || zio->io_error == 0); 3282 3283 /* 3284 * The dpa_dnode is only valid if we are called with a NULL 3285 * zio. This indicates that the arc_read() returned without 3286 * first calling zio_read() to issue a physical read. Once 3287 * a physical read is made the dpa_dnode must be invalidated 3288 * as the locks guarding it may have been dropped. If the 3289 * dpa_dnode is still valid, then we want to add it to the dbuf 3290 * cache. To do so, we must hold the dbuf associated with the block 3291 * we just prefetched, read its contents so that we associate it 3292 * with an arc_buf_t, and then release it. 3293 */ 3294 if (zio != NULL) { 3295 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 3296 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { 3297 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 3298 } else { 3299 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 3300 } 3301 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 3302 3303 dpa->dpa_dnode = NULL; 3304 } else if (dpa->dpa_dnode != NULL) { 3305 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 3306 (dpa->dpa_epbs * (dpa->dpa_curlevel - 3307 dpa->dpa_zb.zb_level)); 3308 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 3309 dpa->dpa_curlevel, curblkid, FTAG); 3310 if (db == NULL) { 3311 arc_buf_destroy(abuf, private); 3312 dbuf_prefetch_fini(dpa, B_TRUE); 3313 return; 3314 } 3315 (void) dbuf_read(db, NULL, 3316 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 3317 dbuf_rele(db, FTAG); 3318 } 3319 3320 dpa->dpa_curlevel--; 3321 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 3322 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 3323 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 3324 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 3325 3326 ASSERT(!BP_IS_REDACTED(bp) || 3327 dsl_dataset_feature_is_active( 3328 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3329 SPA_FEATURE_REDACTED_DATASETS)); 3330 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 3331 arc_buf_destroy(abuf, private); 3332 dbuf_prefetch_fini(dpa, B_TRUE); 3333 return; 3334 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 3335 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 3336 dbuf_issue_final_prefetch(dpa, bp); 3337 } else { 3338 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3339 zbookmark_phys_t zb; 3340 3341 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3342 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 3343 iter_aflags |= ARC_FLAG_L2CACHE; 3344 3345 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3346 3347 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 3348 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 3349 3350 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3351 bp, dbuf_prefetch_indirect_done, dpa, 3352 ZIO_PRIORITY_SYNC_READ, 3353 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3354 &iter_aflags, &zb); 3355 } 3356 3357 arc_buf_destroy(abuf, private); 3358 } 3359 3360 /* 3361 * Issue prefetch reads for the given block on the given level. If the indirect 3362 * blocks above that block are not in memory, we will read them in 3363 * asynchronously. As a result, this call never blocks waiting for a read to 3364 * complete. Note that the prefetch might fail if the dataset is encrypted and 3365 * the encryption key is unmapped before the IO completes. 3366 */ 3367 int 3368 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid, 3369 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb, 3370 void *arg) 3371 { 3372 blkptr_t bp; 3373 int epbs, nlevels, curlevel; 3374 uint64_t curblkid; 3375 3376 ASSERT(blkid != DMU_BONUS_BLKID); 3377 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3378 3379 if (blkid > dn->dn_maxblkid) 3380 goto no_issue; 3381 3382 if (level == 0 && dnode_block_freed(dn, blkid)) 3383 goto no_issue; 3384 3385 /* 3386 * This dnode hasn't been written to disk yet, so there's nothing to 3387 * prefetch. 3388 */ 3389 nlevels = dn->dn_phys->dn_nlevels; 3390 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 3391 goto no_issue; 3392 3393 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3394 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 3395 goto no_issue; 3396 3397 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 3398 level, blkid); 3399 if (db != NULL) { 3400 mutex_exit(&db->db_mtx); 3401 /* 3402 * This dbuf already exists. It is either CACHED, or 3403 * (we assume) about to be read or filled. 3404 */ 3405 goto no_issue; 3406 } 3407 3408 /* 3409 * Find the closest ancestor (indirect block) of the target block 3410 * that is present in the cache. In this indirect block, we will 3411 * find the bp that is at curlevel, curblkid. 3412 */ 3413 curlevel = level; 3414 curblkid = blkid; 3415 while (curlevel < nlevels - 1) { 3416 int parent_level = curlevel + 1; 3417 uint64_t parent_blkid = curblkid >> epbs; 3418 dmu_buf_impl_t *db; 3419 3420 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 3421 FALSE, TRUE, FTAG, &db) == 0) { 3422 blkptr_t *bpp = db->db_buf->b_data; 3423 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 3424 dbuf_rele(db, FTAG); 3425 break; 3426 } 3427 3428 curlevel = parent_level; 3429 curblkid = parent_blkid; 3430 } 3431 3432 if (curlevel == nlevels - 1) { 3433 /* No cached indirect blocks found. */ 3434 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 3435 bp = dn->dn_phys->dn_blkptr[curblkid]; 3436 } 3437 ASSERT(!BP_IS_REDACTED(&bp) || 3438 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset, 3439 SPA_FEATURE_REDACTED_DATASETS)); 3440 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp)) 3441 goto no_issue; 3442 3443 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 3444 3445 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 3446 ZIO_FLAG_CANFAIL); 3447 3448 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 3449 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 3450 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3451 dn->dn_object, level, blkid); 3452 dpa->dpa_curlevel = curlevel; 3453 dpa->dpa_prio = prio; 3454 dpa->dpa_aflags = aflags; 3455 dpa->dpa_spa = dn->dn_objset->os_spa; 3456 dpa->dpa_dnode = dn; 3457 dpa->dpa_epbs = epbs; 3458 dpa->dpa_zio = pio; 3459 dpa->dpa_cb = cb; 3460 dpa->dpa_arg = arg; 3461 3462 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3463 if (dnode_level_is_l2cacheable(&bp, dn, level)) 3464 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 3465 3466 /* 3467 * If we have the indirect just above us, no need to do the asynchronous 3468 * prefetch chain; we'll just run the last step ourselves. If we're at 3469 * a higher level, though, we want to issue the prefetches for all the 3470 * indirect blocks asynchronously, so we can go on with whatever we were 3471 * doing. 3472 */ 3473 if (curlevel == level) { 3474 ASSERT3U(curblkid, ==, blkid); 3475 dbuf_issue_final_prefetch(dpa, &bp); 3476 } else { 3477 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3478 zbookmark_phys_t zb; 3479 3480 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3481 if (dnode_level_is_l2cacheable(&bp, dn, level)) 3482 iter_aflags |= ARC_FLAG_L2CACHE; 3483 3484 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3485 dn->dn_object, curlevel, curblkid); 3486 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3487 &bp, dbuf_prefetch_indirect_done, dpa, 3488 ZIO_PRIORITY_SYNC_READ, 3489 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3490 &iter_aflags, &zb); 3491 } 3492 /* 3493 * We use pio here instead of dpa_zio since it's possible that 3494 * dpa may have already been freed. 3495 */ 3496 zio_nowait(pio); 3497 return (1); 3498 no_issue: 3499 if (cb != NULL) 3500 cb(arg, level, blkid, B_FALSE); 3501 return (0); 3502 } 3503 3504 int 3505 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 3506 arc_flags_t aflags) 3507 { 3508 3509 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL)); 3510 } 3511 3512 /* 3513 * Helper function for dbuf_hold_impl() to copy a buffer. Handles 3514 * the case of encrypted, compressed and uncompressed buffers by 3515 * allocating the new buffer, respectively, with arc_alloc_raw_buf(), 3516 * arc_alloc_compressed_buf() or arc_alloc_buf().* 3517 * 3518 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl(). 3519 */ 3520 noinline static void 3521 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db) 3522 { 3523 dbuf_dirty_record_t *dr = db->db_data_pending; 3524 arc_buf_t *data = dr->dt.dl.dr_data; 3525 enum zio_compress compress_type = arc_get_compression(data); 3526 uint8_t complevel = arc_get_complevel(data); 3527 3528 if (arc_is_encrypted(data)) { 3529 boolean_t byteorder; 3530 uint8_t salt[ZIO_DATA_SALT_LEN]; 3531 uint8_t iv[ZIO_DATA_IV_LEN]; 3532 uint8_t mac[ZIO_DATA_MAC_LEN]; 3533 3534 arc_get_raw_params(data, &byteorder, salt, iv, mac); 3535 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db, 3536 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, 3537 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), 3538 compress_type, complevel)); 3539 } else if (compress_type != ZIO_COMPRESS_OFF) { 3540 dbuf_set_data(db, arc_alloc_compressed_buf( 3541 dn->dn_objset->os_spa, db, arc_buf_size(data), 3542 arc_buf_lsize(data), compress_type, complevel)); 3543 } else { 3544 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db, 3545 DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 3546 } 3547 3548 rw_enter(&db->db_rwlock, RW_WRITER); 3549 memcpy(db->db.db_data, data->b_data, arc_buf_size(data)); 3550 rw_exit(&db->db_rwlock); 3551 } 3552 3553 /* 3554 * Returns with db_holds incremented, and db_mtx not held. 3555 * Note: dn_struct_rwlock must be held. 3556 */ 3557 int 3558 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 3559 boolean_t fail_sparse, boolean_t fail_uncached, 3560 const void *tag, dmu_buf_impl_t **dbp) 3561 { 3562 dmu_buf_impl_t *db, *parent = NULL; 3563 3564 /* If the pool has been created, verify the tx_sync_lock is not held */ 3565 spa_t *spa = dn->dn_objset->os_spa; 3566 dsl_pool_t *dp = spa->spa_dsl_pool; 3567 if (dp != NULL) { 3568 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock)); 3569 } 3570 3571 ASSERT(blkid != DMU_BONUS_BLKID); 3572 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3573 ASSERT3U(dn->dn_nlevels, >, level); 3574 3575 *dbp = NULL; 3576 3577 /* dbuf_find() returns with db_mtx held */ 3578 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 3579 3580 if (db == NULL) { 3581 blkptr_t *bp = NULL; 3582 int err; 3583 3584 if (fail_uncached) 3585 return (SET_ERROR(ENOENT)); 3586 3587 ASSERT3P(parent, ==, NULL); 3588 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 3589 if (fail_sparse) { 3590 if (err == 0 && bp && BP_IS_HOLE(bp)) 3591 err = SET_ERROR(ENOENT); 3592 if (err) { 3593 if (parent) 3594 dbuf_rele(parent, NULL); 3595 return (err); 3596 } 3597 } 3598 if (err && err != ENOENT) 3599 return (err); 3600 db = dbuf_create(dn, level, blkid, parent, bp); 3601 } 3602 3603 if (fail_uncached && db->db_state != DB_CACHED) { 3604 mutex_exit(&db->db_mtx); 3605 return (SET_ERROR(ENOENT)); 3606 } 3607 3608 if (db->db_buf != NULL) { 3609 arc_buf_access(db->db_buf); 3610 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 3611 } 3612 3613 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 3614 3615 /* 3616 * If this buffer is currently syncing out, and we are 3617 * still referencing it from db_data, we need to make a copy 3618 * of it in case we decide we want to dirty it again in this txg. 3619 */ 3620 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 3621 dn->dn_object != DMU_META_DNODE_OBJECT && 3622 db->db_state == DB_CACHED && db->db_data_pending) { 3623 dbuf_dirty_record_t *dr = db->db_data_pending; 3624 if (dr->dt.dl.dr_data == db->db_buf) 3625 dbuf_hold_copy(dn, db); 3626 } 3627 3628 if (multilist_link_active(&db->db_cache_link)) { 3629 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3630 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 3631 db->db_caching_status == DB_DBUF_METADATA_CACHE); 3632 3633 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 3634 (void) zfs_refcount_remove_many( 3635 &dbuf_caches[db->db_caching_status].size, 3636 db->db.db_size, db); 3637 3638 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 3639 DBUF_STAT_BUMPDOWN(metadata_cache_count); 3640 } else { 3641 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 3642 DBUF_STAT_BUMPDOWN(cache_count); 3643 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 3644 db->db.db_size); 3645 } 3646 db->db_caching_status = DB_NO_CACHE; 3647 } 3648 (void) zfs_refcount_add(&db->db_holds, tag); 3649 DBUF_VERIFY(db); 3650 mutex_exit(&db->db_mtx); 3651 3652 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 3653 if (parent) 3654 dbuf_rele(parent, NULL); 3655 3656 ASSERT3P(DB_DNODE(db), ==, dn); 3657 ASSERT3U(db->db_blkid, ==, blkid); 3658 ASSERT3U(db->db_level, ==, level); 3659 *dbp = db; 3660 3661 return (0); 3662 } 3663 3664 dmu_buf_impl_t * 3665 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag) 3666 { 3667 return (dbuf_hold_level(dn, 0, blkid, tag)); 3668 } 3669 3670 dmu_buf_impl_t * 3671 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag) 3672 { 3673 dmu_buf_impl_t *db; 3674 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 3675 return (err ? NULL : db); 3676 } 3677 3678 void 3679 dbuf_create_bonus(dnode_t *dn) 3680 { 3681 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 3682 3683 ASSERT(dn->dn_bonus == NULL); 3684 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 3685 } 3686 3687 int 3688 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 3689 { 3690 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3691 3692 if (db->db_blkid != DMU_SPILL_BLKID) 3693 return (SET_ERROR(ENOTSUP)); 3694 if (blksz == 0) 3695 blksz = SPA_MINBLOCKSIZE; 3696 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 3697 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 3698 3699 dbuf_new_size(db, blksz, tx); 3700 3701 return (0); 3702 } 3703 3704 void 3705 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 3706 { 3707 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 3708 } 3709 3710 #pragma weak dmu_buf_add_ref = dbuf_add_ref 3711 void 3712 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag) 3713 { 3714 int64_t holds = zfs_refcount_add(&db->db_holds, tag); 3715 VERIFY3S(holds, >, 1); 3716 } 3717 3718 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 3719 boolean_t 3720 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 3721 const void *tag) 3722 { 3723 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3724 dmu_buf_impl_t *found_db; 3725 boolean_t result = B_FALSE; 3726 3727 if (blkid == DMU_BONUS_BLKID) 3728 found_db = dbuf_find_bonus(os, obj); 3729 else 3730 found_db = dbuf_find(os, obj, 0, blkid); 3731 3732 if (found_db != NULL) { 3733 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 3734 (void) zfs_refcount_add(&db->db_holds, tag); 3735 result = B_TRUE; 3736 } 3737 mutex_exit(&found_db->db_mtx); 3738 } 3739 return (result); 3740 } 3741 3742 /* 3743 * If you call dbuf_rele() you had better not be referencing the dnode handle 3744 * unless you have some other direct or indirect hold on the dnode. (An indirect 3745 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 3746 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 3747 * dnode's parent dbuf evicting its dnode handles. 3748 */ 3749 void 3750 dbuf_rele(dmu_buf_impl_t *db, const void *tag) 3751 { 3752 mutex_enter(&db->db_mtx); 3753 dbuf_rele_and_unlock(db, tag, B_FALSE); 3754 } 3755 3756 void 3757 dmu_buf_rele(dmu_buf_t *db, const void *tag) 3758 { 3759 dbuf_rele((dmu_buf_impl_t *)db, tag); 3760 } 3761 3762 /* 3763 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 3764 * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 3765 * argument should be set if we are already in the dbuf-evicting code 3766 * path, in which case we don't want to recursively evict. This allows us to 3767 * avoid deeply nested stacks that would have a call flow similar to this: 3768 * 3769 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 3770 * ^ | 3771 * | | 3772 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 3773 * 3774 */ 3775 void 3776 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting) 3777 { 3778 int64_t holds; 3779 uint64_t size; 3780 3781 ASSERT(MUTEX_HELD(&db->db_mtx)); 3782 DBUF_VERIFY(db); 3783 3784 /* 3785 * Remove the reference to the dbuf before removing its hold on the 3786 * dnode so we can guarantee in dnode_move() that a referenced bonus 3787 * buffer has a corresponding dnode hold. 3788 */ 3789 holds = zfs_refcount_remove(&db->db_holds, tag); 3790 ASSERT(holds >= 0); 3791 3792 /* 3793 * We can't freeze indirects if there is a possibility that they 3794 * may be modified in the current syncing context. 3795 */ 3796 if (db->db_buf != NULL && 3797 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 3798 arc_buf_freeze(db->db_buf); 3799 } 3800 3801 if (holds == db->db_dirtycnt && 3802 db->db_level == 0 && db->db_user_immediate_evict) 3803 dbuf_evict_user(db); 3804 3805 if (holds == 0) { 3806 if (db->db_blkid == DMU_BONUS_BLKID) { 3807 dnode_t *dn; 3808 boolean_t evict_dbuf = db->db_pending_evict; 3809 3810 /* 3811 * If the dnode moves here, we cannot cross this 3812 * barrier until the move completes. 3813 */ 3814 DB_DNODE_ENTER(db); 3815 3816 dn = DB_DNODE(db); 3817 atomic_dec_32(&dn->dn_dbufs_count); 3818 3819 /* 3820 * Decrementing the dbuf count means that the bonus 3821 * buffer's dnode hold is no longer discounted in 3822 * dnode_move(). The dnode cannot move until after 3823 * the dnode_rele() below. 3824 */ 3825 DB_DNODE_EXIT(db); 3826 3827 /* 3828 * Do not reference db after its lock is dropped. 3829 * Another thread may evict it. 3830 */ 3831 mutex_exit(&db->db_mtx); 3832 3833 if (evict_dbuf) 3834 dnode_evict_bonus(dn); 3835 3836 dnode_rele(dn, db); 3837 } else if (db->db_buf == NULL) { 3838 /* 3839 * This is a special case: we never associated this 3840 * dbuf with any data allocated from the ARC. 3841 */ 3842 ASSERT(db->db_state == DB_UNCACHED || 3843 db->db_state == DB_NOFILL); 3844 dbuf_destroy(db); 3845 } else if (arc_released(db->db_buf)) { 3846 /* 3847 * This dbuf has anonymous data associated with it. 3848 */ 3849 dbuf_destroy(db); 3850 } else { 3851 boolean_t do_arc_evict = B_FALSE; 3852 blkptr_t bp; 3853 spa_t *spa = dmu_objset_spa(db->db_objset); 3854 3855 if (!DBUF_IS_CACHEABLE(db) && 3856 db->db_blkptr != NULL && 3857 !BP_IS_HOLE(db->db_blkptr) && 3858 !BP_IS_EMBEDDED(db->db_blkptr)) { 3859 do_arc_evict = B_TRUE; 3860 bp = *db->db_blkptr; 3861 } 3862 3863 if (!DBUF_IS_CACHEABLE(db) || 3864 db->db_pending_evict) { 3865 dbuf_destroy(db); 3866 } else if (!multilist_link_active(&db->db_cache_link)) { 3867 ASSERT3U(db->db_caching_status, ==, 3868 DB_NO_CACHE); 3869 3870 dbuf_cached_state_t dcs = 3871 dbuf_include_in_metadata_cache(db) ? 3872 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 3873 db->db_caching_status = dcs; 3874 3875 multilist_insert(&dbuf_caches[dcs].cache, db); 3876 uint64_t db_size = db->db.db_size; 3877 size = zfs_refcount_add_many( 3878 &dbuf_caches[dcs].size, db_size, db); 3879 uint8_t db_level = db->db_level; 3880 mutex_exit(&db->db_mtx); 3881 3882 if (dcs == DB_DBUF_METADATA_CACHE) { 3883 DBUF_STAT_BUMP(metadata_cache_count); 3884 DBUF_STAT_MAX( 3885 metadata_cache_size_bytes_max, 3886 size); 3887 } else { 3888 DBUF_STAT_BUMP(cache_count); 3889 DBUF_STAT_MAX(cache_size_bytes_max, 3890 size); 3891 DBUF_STAT_BUMP(cache_levels[db_level]); 3892 DBUF_STAT_INCR( 3893 cache_levels_bytes[db_level], 3894 db_size); 3895 } 3896 3897 if (dcs == DB_DBUF_CACHE && !evicting) 3898 dbuf_evict_notify(size); 3899 } 3900 3901 if (do_arc_evict) 3902 arc_freed(spa, &bp); 3903 } 3904 } else { 3905 mutex_exit(&db->db_mtx); 3906 } 3907 3908 } 3909 3910 #pragma weak dmu_buf_refcount = dbuf_refcount 3911 uint64_t 3912 dbuf_refcount(dmu_buf_impl_t *db) 3913 { 3914 return (zfs_refcount_count(&db->db_holds)); 3915 } 3916 3917 uint64_t 3918 dmu_buf_user_refcount(dmu_buf_t *db_fake) 3919 { 3920 uint64_t holds; 3921 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3922 3923 mutex_enter(&db->db_mtx); 3924 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); 3925 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; 3926 mutex_exit(&db->db_mtx); 3927 3928 return (holds); 3929 } 3930 3931 void * 3932 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 3933 dmu_buf_user_t *new_user) 3934 { 3935 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3936 3937 mutex_enter(&db->db_mtx); 3938 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3939 if (db->db_user == old_user) 3940 db->db_user = new_user; 3941 else 3942 old_user = db->db_user; 3943 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3944 mutex_exit(&db->db_mtx); 3945 3946 return (old_user); 3947 } 3948 3949 void * 3950 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3951 { 3952 return (dmu_buf_replace_user(db_fake, NULL, user)); 3953 } 3954 3955 void * 3956 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3957 { 3958 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3959 3960 db->db_user_immediate_evict = TRUE; 3961 return (dmu_buf_set_user(db_fake, user)); 3962 } 3963 3964 void * 3965 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3966 { 3967 return (dmu_buf_replace_user(db_fake, user, NULL)); 3968 } 3969 3970 void * 3971 dmu_buf_get_user(dmu_buf_t *db_fake) 3972 { 3973 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3974 3975 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3976 return (db->db_user); 3977 } 3978 3979 void 3980 dmu_buf_user_evict_wait(void) 3981 { 3982 taskq_wait(dbu_evict_taskq); 3983 } 3984 3985 blkptr_t * 3986 dmu_buf_get_blkptr(dmu_buf_t *db) 3987 { 3988 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3989 return (dbi->db_blkptr); 3990 } 3991 3992 objset_t * 3993 dmu_buf_get_objset(dmu_buf_t *db) 3994 { 3995 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3996 return (dbi->db_objset); 3997 } 3998 3999 dnode_t * 4000 dmu_buf_dnode_enter(dmu_buf_t *db) 4001 { 4002 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4003 DB_DNODE_ENTER(dbi); 4004 return (DB_DNODE(dbi)); 4005 } 4006 4007 void 4008 dmu_buf_dnode_exit(dmu_buf_t *db) 4009 { 4010 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4011 DB_DNODE_EXIT(dbi); 4012 } 4013 4014 static void 4015 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 4016 { 4017 /* ASSERT(dmu_tx_is_syncing(tx) */ 4018 ASSERT(MUTEX_HELD(&db->db_mtx)); 4019 4020 if (db->db_blkptr != NULL) 4021 return; 4022 4023 if (db->db_blkid == DMU_SPILL_BLKID) { 4024 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 4025 BP_ZERO(db->db_blkptr); 4026 return; 4027 } 4028 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 4029 /* 4030 * This buffer was allocated at a time when there was 4031 * no available blkptrs from the dnode, or it was 4032 * inappropriate to hook it in (i.e., nlevels mismatch). 4033 */ 4034 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 4035 ASSERT(db->db_parent == NULL); 4036 db->db_parent = dn->dn_dbuf; 4037 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 4038 DBUF_VERIFY(db); 4039 } else { 4040 dmu_buf_impl_t *parent = db->db_parent; 4041 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4042 4043 ASSERT(dn->dn_phys->dn_nlevels > 1); 4044 if (parent == NULL) { 4045 mutex_exit(&db->db_mtx); 4046 rw_enter(&dn->dn_struct_rwlock, RW_READER); 4047 parent = dbuf_hold_level(dn, db->db_level + 1, 4048 db->db_blkid >> epbs, db); 4049 rw_exit(&dn->dn_struct_rwlock); 4050 mutex_enter(&db->db_mtx); 4051 db->db_parent = parent; 4052 } 4053 db->db_blkptr = (blkptr_t *)parent->db.db_data + 4054 (db->db_blkid & ((1ULL << epbs) - 1)); 4055 DBUF_VERIFY(db); 4056 } 4057 } 4058 4059 static void 4060 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4061 { 4062 dmu_buf_impl_t *db = dr->dr_dbuf; 4063 void *data = dr->dt.dl.dr_data; 4064 4065 ASSERT0(db->db_level); 4066 ASSERT(MUTEX_HELD(&db->db_mtx)); 4067 ASSERT(db->db_blkid == DMU_BONUS_BLKID); 4068 ASSERT(data != NULL); 4069 4070 dnode_t *dn = dr->dr_dnode; 4071 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 4072 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 4073 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys)); 4074 4075 dbuf_sync_leaf_verify_bonus_dnode(dr); 4076 4077 dbuf_undirty_bonus(dr); 4078 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4079 } 4080 4081 /* 4082 * When syncing out a blocks of dnodes, adjust the block to deal with 4083 * encryption. Normally, we make sure the block is decrypted before writing 4084 * it. If we have crypt params, then we are writing a raw (encrypted) block, 4085 * from a raw receive. In this case, set the ARC buf's crypt params so 4086 * that the BP will be filled with the correct byteorder, salt, iv, and mac. 4087 */ 4088 static void 4089 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) 4090 { 4091 int err; 4092 dmu_buf_impl_t *db = dr->dr_dbuf; 4093 4094 ASSERT(MUTEX_HELD(&db->db_mtx)); 4095 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 4096 ASSERT3U(db->db_level, ==, 0); 4097 4098 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { 4099 zbookmark_phys_t zb; 4100 4101 /* 4102 * Unfortunately, there is currently no mechanism for 4103 * syncing context to handle decryption errors. An error 4104 * here is only possible if an attacker maliciously 4105 * changed a dnode block and updated the associated 4106 * checksums going up the block tree. 4107 */ 4108 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 4109 db->db.db_object, db->db_level, db->db_blkid); 4110 err = arc_untransform(db->db_buf, db->db_objset->os_spa, 4111 &zb, B_TRUE); 4112 if (err) 4113 panic("Invalid dnode block MAC"); 4114 } else if (dr->dt.dl.dr_has_raw_params) { 4115 (void) arc_release(dr->dt.dl.dr_data, db); 4116 arc_convert_to_raw(dr->dt.dl.dr_data, 4117 dmu_objset_id(db->db_objset), 4118 dr->dt.dl.dr_byteorder, DMU_OT_DNODE, 4119 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); 4120 } 4121 } 4122 4123 /* 4124 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it 4125 * is critical the we not allow the compiler to inline this function in to 4126 * dbuf_sync_list() thereby drastically bloating the stack usage. 4127 */ 4128 noinline static void 4129 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4130 { 4131 dmu_buf_impl_t *db = dr->dr_dbuf; 4132 dnode_t *dn = dr->dr_dnode; 4133 4134 ASSERT(dmu_tx_is_syncing(tx)); 4135 4136 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4137 4138 mutex_enter(&db->db_mtx); 4139 4140 ASSERT(db->db_level > 0); 4141 DBUF_VERIFY(db); 4142 4143 /* Read the block if it hasn't been read yet. */ 4144 if (db->db_buf == NULL) { 4145 mutex_exit(&db->db_mtx); 4146 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 4147 mutex_enter(&db->db_mtx); 4148 } 4149 ASSERT3U(db->db_state, ==, DB_CACHED); 4150 ASSERT(db->db_buf != NULL); 4151 4152 /* Indirect block size must match what the dnode thinks it is. */ 4153 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4154 dbuf_check_blkptr(dn, db); 4155 4156 /* Provide the pending dirty record to child dbufs */ 4157 db->db_data_pending = dr; 4158 4159 mutex_exit(&db->db_mtx); 4160 4161 dbuf_write(dr, db->db_buf, tx); 4162 4163 zio_t *zio = dr->dr_zio; 4164 mutex_enter(&dr->dt.di.dr_mtx); 4165 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 4166 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4167 mutex_exit(&dr->dt.di.dr_mtx); 4168 zio_nowait(zio); 4169 } 4170 4171 /* 4172 * Verify that the size of the data in our bonus buffer does not exceed 4173 * its recorded size. 4174 * 4175 * The purpose of this verification is to catch any cases in development 4176 * where the size of a phys structure (i.e space_map_phys_t) grows and, 4177 * due to incorrect feature management, older pools expect to read more 4178 * data even though they didn't actually write it to begin with. 4179 * 4180 * For a example, this would catch an error in the feature logic where we 4181 * open an older pool and we expect to write the space map histogram of 4182 * a space map with size SPACE_MAP_SIZE_V0. 4183 */ 4184 static void 4185 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr) 4186 { 4187 #ifdef ZFS_DEBUG 4188 dnode_t *dn = dr->dr_dnode; 4189 4190 /* 4191 * Encrypted bonus buffers can have data past their bonuslen. 4192 * Skip the verification of these blocks. 4193 */ 4194 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)) 4195 return; 4196 4197 uint16_t bonuslen = dn->dn_phys->dn_bonuslen; 4198 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 4199 ASSERT3U(bonuslen, <=, maxbonuslen); 4200 4201 arc_buf_t *datap = dr->dt.dl.dr_data; 4202 char *datap_end = ((char *)datap) + bonuslen; 4203 char *datap_max = ((char *)datap) + maxbonuslen; 4204 4205 /* ensure that everything is zero after our data */ 4206 for (; datap_end < datap_max; datap_end++) 4207 ASSERT(*datap_end == 0); 4208 #endif 4209 } 4210 4211 static blkptr_t * 4212 dbuf_lightweight_bp(dbuf_dirty_record_t *dr) 4213 { 4214 /* This must be a lightweight dirty record. */ 4215 ASSERT3P(dr->dr_dbuf, ==, NULL); 4216 dnode_t *dn = dr->dr_dnode; 4217 4218 if (dn->dn_phys->dn_nlevels == 1) { 4219 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr); 4220 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]); 4221 } else { 4222 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf; 4223 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 4224 VERIFY3U(parent_db->db_level, ==, 1); 4225 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn); 4226 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid); 4227 blkptr_t *bp = parent_db->db.db_data; 4228 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]); 4229 } 4230 } 4231 4232 static void 4233 dbuf_lightweight_ready(zio_t *zio) 4234 { 4235 dbuf_dirty_record_t *dr = zio->io_private; 4236 blkptr_t *bp = zio->io_bp; 4237 4238 if (zio->io_error != 0) 4239 return; 4240 4241 dnode_t *dn = dr->dr_dnode; 4242 4243 blkptr_t *bp_orig = dbuf_lightweight_bp(dr); 4244 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4245 int64_t delta = bp_get_dsize_sync(spa, bp) - 4246 bp_get_dsize_sync(spa, bp_orig); 4247 dnode_diduse_space(dn, delta); 4248 4249 uint64_t blkid = dr->dt.dll.dr_blkid; 4250 mutex_enter(&dn->dn_mtx); 4251 if (blkid > dn->dn_phys->dn_maxblkid) { 4252 ASSERT0(dn->dn_objset->os_raw_receive); 4253 dn->dn_phys->dn_maxblkid = blkid; 4254 } 4255 mutex_exit(&dn->dn_mtx); 4256 4257 if (!BP_IS_EMBEDDED(bp)) { 4258 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1; 4259 BP_SET_FILL(bp, fill); 4260 } 4261 4262 dmu_buf_impl_t *parent_db; 4263 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1); 4264 if (dr->dr_parent == NULL) { 4265 parent_db = dn->dn_dbuf; 4266 } else { 4267 parent_db = dr->dr_parent->dr_dbuf; 4268 } 4269 rw_enter(&parent_db->db_rwlock, RW_WRITER); 4270 *bp_orig = *bp; 4271 rw_exit(&parent_db->db_rwlock); 4272 } 4273 4274 static void 4275 dbuf_lightweight_physdone(zio_t *zio) 4276 { 4277 dbuf_dirty_record_t *dr = zio->io_private; 4278 dsl_pool_t *dp = spa_get_dsl(zio->io_spa); 4279 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 4280 4281 /* 4282 * The callback will be called io_phys_children times. Retire one 4283 * portion of our dirty space each time we are called. Any rounding 4284 * error will be cleaned up by dbuf_lightweight_done(). 4285 */ 4286 int delta = dr->dr_accounted / zio->io_phys_children; 4287 dsl_pool_undirty_space(dp, delta, zio->io_txg); 4288 } 4289 4290 static void 4291 dbuf_lightweight_done(zio_t *zio) 4292 { 4293 dbuf_dirty_record_t *dr = zio->io_private; 4294 4295 VERIFY0(zio->io_error); 4296 4297 objset_t *os = dr->dr_dnode->dn_objset; 4298 dmu_tx_t *tx = os->os_synctx; 4299 4300 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4301 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4302 } else { 4303 dsl_dataset_t *ds = os->os_dsl_dataset; 4304 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE); 4305 dsl_dataset_block_born(ds, zio->io_bp, tx); 4306 } 4307 4308 /* 4309 * See comment in dbuf_write_done(). 4310 */ 4311 if (zio->io_phys_children == 0) { 4312 dsl_pool_undirty_space(dmu_objset_pool(os), 4313 dr->dr_accounted, zio->io_txg); 4314 } else { 4315 dsl_pool_undirty_space(dmu_objset_pool(os), 4316 dr->dr_accounted % zio->io_phys_children, zio->io_txg); 4317 } 4318 4319 abd_free(dr->dt.dll.dr_abd); 4320 kmem_free(dr, sizeof (*dr)); 4321 } 4322 4323 noinline static void 4324 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4325 { 4326 dnode_t *dn = dr->dr_dnode; 4327 zio_t *pio; 4328 if (dn->dn_phys->dn_nlevels == 1) { 4329 pio = dn->dn_zio; 4330 } else { 4331 pio = dr->dr_parent->dr_zio; 4332 } 4333 4334 zbookmark_phys_t zb = { 4335 .zb_objset = dmu_objset_id(dn->dn_objset), 4336 .zb_object = dn->dn_object, 4337 .zb_level = 0, 4338 .zb_blkid = dr->dt.dll.dr_blkid, 4339 }; 4340 4341 /* 4342 * See comment in dbuf_write(). This is so that zio->io_bp_orig 4343 * will have the old BP in dbuf_lightweight_done(). 4344 */ 4345 dr->dr_bp_copy = *dbuf_lightweight_bp(dr); 4346 4347 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset), 4348 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd, 4349 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd), 4350 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL, 4351 dbuf_lightweight_physdone, dbuf_lightweight_done, dr, 4352 ZIO_PRIORITY_ASYNC_WRITE, 4353 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb); 4354 4355 zio_nowait(dr->dr_zio); 4356 } 4357 4358 /* 4359 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is 4360 * critical the we not allow the compiler to inline this function in to 4361 * dbuf_sync_list() thereby drastically bloating the stack usage. 4362 */ 4363 noinline static void 4364 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4365 { 4366 arc_buf_t **datap = &dr->dt.dl.dr_data; 4367 dmu_buf_impl_t *db = dr->dr_dbuf; 4368 dnode_t *dn = dr->dr_dnode; 4369 objset_t *os; 4370 uint64_t txg = tx->tx_txg; 4371 4372 ASSERT(dmu_tx_is_syncing(tx)); 4373 4374 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4375 4376 mutex_enter(&db->db_mtx); 4377 /* 4378 * To be synced, we must be dirtied. But we 4379 * might have been freed after the dirty. 4380 */ 4381 if (db->db_state == DB_UNCACHED) { 4382 /* This buffer has been freed since it was dirtied */ 4383 ASSERT(db->db.db_data == NULL); 4384 } else if (db->db_state == DB_FILL) { 4385 /* This buffer was freed and is now being re-filled */ 4386 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 4387 } else { 4388 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 4389 } 4390 DBUF_VERIFY(db); 4391 4392 if (db->db_blkid == DMU_SPILL_BLKID) { 4393 mutex_enter(&dn->dn_mtx); 4394 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 4395 /* 4396 * In the previous transaction group, the bonus buffer 4397 * was entirely used to store the attributes for the 4398 * dnode which overrode the dn_spill field. However, 4399 * when adding more attributes to the file a spill 4400 * block was required to hold the extra attributes. 4401 * 4402 * Make sure to clear the garbage left in the dn_spill 4403 * field from the previous attributes in the bonus 4404 * buffer. Otherwise, after writing out the spill 4405 * block to the new allocated dva, it will free 4406 * the old block pointed to by the invalid dn_spill. 4407 */ 4408 db->db_blkptr = NULL; 4409 } 4410 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 4411 mutex_exit(&dn->dn_mtx); 4412 } 4413 4414 /* 4415 * If this is a bonus buffer, simply copy the bonus data into the 4416 * dnode. It will be written out when the dnode is synced (and it 4417 * will be synced, since it must have been dirty for dbuf_sync to 4418 * be called). 4419 */ 4420 if (db->db_blkid == DMU_BONUS_BLKID) { 4421 ASSERT(dr->dr_dbuf == db); 4422 dbuf_sync_bonus(dr, tx); 4423 return; 4424 } 4425 4426 os = dn->dn_objset; 4427 4428 /* 4429 * This function may have dropped the db_mtx lock allowing a dmu_sync 4430 * operation to sneak in. As a result, we need to ensure that we 4431 * don't check the dr_override_state until we have returned from 4432 * dbuf_check_blkptr. 4433 */ 4434 dbuf_check_blkptr(dn, db); 4435 4436 /* 4437 * If this buffer is in the middle of an immediate write, 4438 * wait for the synchronous IO to complete. 4439 */ 4440 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 4441 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 4442 cv_wait(&db->db_changed, &db->db_mtx); 4443 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 4444 } 4445 4446 /* 4447 * If this is a dnode block, ensure it is appropriately encrypted 4448 * or decrypted, depending on what we are writing to it this txg. 4449 */ 4450 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) 4451 dbuf_prepare_encrypted_dnode_leaf(dr); 4452 4453 if (db->db_state != DB_NOFILL && 4454 dn->dn_object != DMU_META_DNODE_OBJECT && 4455 zfs_refcount_count(&db->db_holds) > 1 && 4456 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 4457 *datap == db->db_buf) { 4458 /* 4459 * If this buffer is currently "in use" (i.e., there 4460 * are active holds and db_data still references it), 4461 * then make a copy before we start the write so that 4462 * any modifications from the open txg will not leak 4463 * into this write. 4464 * 4465 * NOTE: this copy does not need to be made for 4466 * objects only modified in the syncing context (e.g. 4467 * DNONE_DNODE blocks). 4468 */ 4469 int psize = arc_buf_size(*datap); 4470 int lsize = arc_buf_lsize(*datap); 4471 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 4472 enum zio_compress compress_type = arc_get_compression(*datap); 4473 uint8_t complevel = arc_get_complevel(*datap); 4474 4475 if (arc_is_encrypted(*datap)) { 4476 boolean_t byteorder; 4477 uint8_t salt[ZIO_DATA_SALT_LEN]; 4478 uint8_t iv[ZIO_DATA_IV_LEN]; 4479 uint8_t mac[ZIO_DATA_MAC_LEN]; 4480 4481 arc_get_raw_params(*datap, &byteorder, salt, iv, mac); 4482 *datap = arc_alloc_raw_buf(os->os_spa, db, 4483 dmu_objset_id(os), byteorder, salt, iv, mac, 4484 dn->dn_type, psize, lsize, compress_type, 4485 complevel); 4486 } else if (compress_type != ZIO_COMPRESS_OFF) { 4487 ASSERT3U(type, ==, ARC_BUFC_DATA); 4488 *datap = arc_alloc_compressed_buf(os->os_spa, db, 4489 psize, lsize, compress_type, complevel); 4490 } else { 4491 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 4492 } 4493 memcpy((*datap)->b_data, db->db.db_data, psize); 4494 } 4495 db->db_data_pending = dr; 4496 4497 mutex_exit(&db->db_mtx); 4498 4499 dbuf_write(dr, *datap, tx); 4500 4501 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4502 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 4503 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr); 4504 } else { 4505 zio_nowait(dr->dr_zio); 4506 } 4507 } 4508 4509 void 4510 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 4511 { 4512 dbuf_dirty_record_t *dr; 4513 4514 while ((dr = list_head(list))) { 4515 if (dr->dr_zio != NULL) { 4516 /* 4517 * If we find an already initialized zio then we 4518 * are processing the meta-dnode, and we have finished. 4519 * The dbufs for all dnodes are put back on the list 4520 * during processing, so that we can zio_wait() 4521 * these IOs after initiating all child IOs. 4522 */ 4523 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 4524 DMU_META_DNODE_OBJECT); 4525 break; 4526 } 4527 list_remove(list, dr); 4528 if (dr->dr_dbuf == NULL) { 4529 dbuf_sync_lightweight(dr, tx); 4530 } else { 4531 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 4532 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 4533 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 4534 } 4535 if (dr->dr_dbuf->db_level > 0) 4536 dbuf_sync_indirect(dr, tx); 4537 else 4538 dbuf_sync_leaf(dr, tx); 4539 } 4540 } 4541 } 4542 4543 static void 4544 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4545 { 4546 (void) buf; 4547 dmu_buf_impl_t *db = vdb; 4548 dnode_t *dn; 4549 blkptr_t *bp = zio->io_bp; 4550 blkptr_t *bp_orig = &zio->io_bp_orig; 4551 spa_t *spa = zio->io_spa; 4552 int64_t delta; 4553 uint64_t fill = 0; 4554 int i; 4555 4556 ASSERT3P(db->db_blkptr, !=, NULL); 4557 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 4558 4559 DB_DNODE_ENTER(db); 4560 dn = DB_DNODE(db); 4561 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 4562 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 4563 zio->io_prev_space_delta = delta; 4564 4565 if (bp->blk_birth != 0) { 4566 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 4567 BP_GET_TYPE(bp) == dn->dn_type) || 4568 (db->db_blkid == DMU_SPILL_BLKID && 4569 BP_GET_TYPE(bp) == dn->dn_bonustype) || 4570 BP_IS_EMBEDDED(bp)); 4571 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 4572 } 4573 4574 mutex_enter(&db->db_mtx); 4575 4576 #ifdef ZFS_DEBUG 4577 if (db->db_blkid == DMU_SPILL_BLKID) { 4578 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4579 ASSERT(!(BP_IS_HOLE(bp)) && 4580 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4581 } 4582 #endif 4583 4584 if (db->db_level == 0) { 4585 mutex_enter(&dn->dn_mtx); 4586 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 4587 db->db_blkid != DMU_SPILL_BLKID) { 4588 ASSERT0(db->db_objset->os_raw_receive); 4589 dn->dn_phys->dn_maxblkid = db->db_blkid; 4590 } 4591 mutex_exit(&dn->dn_mtx); 4592 4593 if (dn->dn_type == DMU_OT_DNODE) { 4594 i = 0; 4595 while (i < db->db.db_size) { 4596 dnode_phys_t *dnp = 4597 (void *)(((char *)db->db.db_data) + i); 4598 4599 i += DNODE_MIN_SIZE; 4600 if (dnp->dn_type != DMU_OT_NONE) { 4601 fill++; 4602 i += dnp->dn_extra_slots * 4603 DNODE_MIN_SIZE; 4604 } 4605 } 4606 } else { 4607 if (BP_IS_HOLE(bp)) { 4608 fill = 0; 4609 } else { 4610 fill = 1; 4611 } 4612 } 4613 } else { 4614 blkptr_t *ibp = db->db.db_data; 4615 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4616 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 4617 if (BP_IS_HOLE(ibp)) 4618 continue; 4619 fill += BP_GET_FILL(ibp); 4620 } 4621 } 4622 DB_DNODE_EXIT(db); 4623 4624 if (!BP_IS_EMBEDDED(bp)) 4625 BP_SET_FILL(bp, fill); 4626 4627 mutex_exit(&db->db_mtx); 4628 4629 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG); 4630 *db->db_blkptr = *bp; 4631 dmu_buf_unlock_parent(db, dblt, FTAG); 4632 } 4633 4634 /* 4635 * This function gets called just prior to running through the compression 4636 * stage of the zio pipeline. If we're an indirect block comprised of only 4637 * holes, then we want this indirect to be compressed away to a hole. In 4638 * order to do that we must zero out any information about the holes that 4639 * this indirect points to prior to before we try to compress it. 4640 */ 4641 static void 4642 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4643 { 4644 (void) zio, (void) buf; 4645 dmu_buf_impl_t *db = vdb; 4646 dnode_t *dn; 4647 blkptr_t *bp; 4648 unsigned int epbs, i; 4649 4650 ASSERT3U(db->db_level, >, 0); 4651 DB_DNODE_ENTER(db); 4652 dn = DB_DNODE(db); 4653 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4654 ASSERT3U(epbs, <, 31); 4655 4656 /* Determine if all our children are holes */ 4657 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { 4658 if (!BP_IS_HOLE(bp)) 4659 break; 4660 } 4661 4662 /* 4663 * If all the children are holes, then zero them all out so that 4664 * we may get compressed away. 4665 */ 4666 if (i == 1ULL << epbs) { 4667 /* 4668 * We only found holes. Grab the rwlock to prevent 4669 * anybody from reading the blocks we're about to 4670 * zero out. 4671 */ 4672 rw_enter(&db->db_rwlock, RW_WRITER); 4673 memset(db->db.db_data, 0, db->db.db_size); 4674 rw_exit(&db->db_rwlock); 4675 } 4676 DB_DNODE_EXIT(db); 4677 } 4678 4679 /* 4680 * The SPA will call this callback several times for each zio - once 4681 * for every physical child i/o (zio->io_phys_children times). This 4682 * allows the DMU to monitor the progress of each logical i/o. For example, 4683 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 4684 * block. There may be a long delay before all copies/fragments are completed, 4685 * so this callback allows us to retire dirty space gradually, as the physical 4686 * i/os complete. 4687 */ 4688 static void 4689 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 4690 { 4691 (void) buf; 4692 dmu_buf_impl_t *db = arg; 4693 objset_t *os = db->db_objset; 4694 dsl_pool_t *dp = dmu_objset_pool(os); 4695 dbuf_dirty_record_t *dr; 4696 int delta = 0; 4697 4698 dr = db->db_data_pending; 4699 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 4700 4701 /* 4702 * The callback will be called io_phys_children times. Retire one 4703 * portion of our dirty space each time we are called. Any rounding 4704 * error will be cleaned up by dbuf_write_done(). 4705 */ 4706 delta = dr->dr_accounted / zio->io_phys_children; 4707 dsl_pool_undirty_space(dp, delta, zio->io_txg); 4708 } 4709 4710 static void 4711 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 4712 { 4713 (void) buf; 4714 dmu_buf_impl_t *db = vdb; 4715 blkptr_t *bp_orig = &zio->io_bp_orig; 4716 blkptr_t *bp = db->db_blkptr; 4717 objset_t *os = db->db_objset; 4718 dmu_tx_t *tx = os->os_synctx; 4719 4720 ASSERT0(zio->io_error); 4721 ASSERT(db->db_blkptr == bp); 4722 4723 /* 4724 * For nopwrites and rewrites we ensure that the bp matches our 4725 * original and bypass all the accounting. 4726 */ 4727 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4728 ASSERT(BP_EQUAL(bp, bp_orig)); 4729 } else { 4730 dsl_dataset_t *ds = os->os_dsl_dataset; 4731 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 4732 dsl_dataset_block_born(ds, bp, tx); 4733 } 4734 4735 mutex_enter(&db->db_mtx); 4736 4737 DBUF_VERIFY(db); 4738 4739 dbuf_dirty_record_t *dr = db->db_data_pending; 4740 dnode_t *dn = dr->dr_dnode; 4741 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4742 ASSERT(dr->dr_dbuf == db); 4743 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 4744 list_remove(&db->db_dirty_records, dr); 4745 4746 #ifdef ZFS_DEBUG 4747 if (db->db_blkid == DMU_SPILL_BLKID) { 4748 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4749 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 4750 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4751 } 4752 #endif 4753 4754 if (db->db_level == 0) { 4755 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 4756 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 4757 if (db->db_state != DB_NOFILL) { 4758 if (dr->dt.dl.dr_data != db->db_buf) 4759 arc_buf_destroy(dr->dt.dl.dr_data, db); 4760 } 4761 } else { 4762 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4763 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 4764 if (!BP_IS_HOLE(db->db_blkptr)) { 4765 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift - 4766 SPA_BLKPTRSHIFT; 4767 ASSERT3U(db->db_blkid, <=, 4768 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 4769 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 4770 db->db.db_size); 4771 } 4772 mutex_destroy(&dr->dt.di.dr_mtx); 4773 list_destroy(&dr->dt.di.dr_children); 4774 } 4775 4776 cv_broadcast(&db->db_changed); 4777 ASSERT(db->db_dirtycnt > 0); 4778 db->db_dirtycnt -= 1; 4779 db->db_data_pending = NULL; 4780 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4781 4782 /* 4783 * If we didn't do a physical write in this ZIO and we 4784 * still ended up here, it means that the space of the 4785 * dbuf that we just released (and undirtied) above hasn't 4786 * been marked as undirtied in the pool's accounting. 4787 * 4788 * Thus, we undirty that space in the pool's view of the 4789 * world here. For physical writes this type of update 4790 * happens in dbuf_write_physdone(). 4791 * 4792 * If we did a physical write, cleanup any rounding errors 4793 * that came up due to writing multiple copies of a block 4794 * on disk [see dbuf_write_physdone()]. 4795 */ 4796 if (zio->io_phys_children == 0) { 4797 dsl_pool_undirty_space(dmu_objset_pool(os), 4798 dr->dr_accounted, zio->io_txg); 4799 } else { 4800 dsl_pool_undirty_space(dmu_objset_pool(os), 4801 dr->dr_accounted % zio->io_phys_children, zio->io_txg); 4802 } 4803 4804 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 4805 } 4806 4807 static void 4808 dbuf_write_nofill_ready(zio_t *zio) 4809 { 4810 dbuf_write_ready(zio, NULL, zio->io_private); 4811 } 4812 4813 static void 4814 dbuf_write_nofill_done(zio_t *zio) 4815 { 4816 dbuf_write_done(zio, NULL, zio->io_private); 4817 } 4818 4819 static void 4820 dbuf_write_override_ready(zio_t *zio) 4821 { 4822 dbuf_dirty_record_t *dr = zio->io_private; 4823 dmu_buf_impl_t *db = dr->dr_dbuf; 4824 4825 dbuf_write_ready(zio, NULL, db); 4826 } 4827 4828 static void 4829 dbuf_write_override_done(zio_t *zio) 4830 { 4831 dbuf_dirty_record_t *dr = zio->io_private; 4832 dmu_buf_impl_t *db = dr->dr_dbuf; 4833 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 4834 4835 mutex_enter(&db->db_mtx); 4836 if (!BP_EQUAL(zio->io_bp, obp)) { 4837 if (!BP_IS_HOLE(obp)) 4838 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 4839 arc_release(dr->dt.dl.dr_data, db); 4840 } 4841 mutex_exit(&db->db_mtx); 4842 4843 dbuf_write_done(zio, NULL, db); 4844 4845 if (zio->io_abd != NULL) 4846 abd_free(zio->io_abd); 4847 } 4848 4849 typedef struct dbuf_remap_impl_callback_arg { 4850 objset_t *drica_os; 4851 uint64_t drica_blk_birth; 4852 dmu_tx_t *drica_tx; 4853 } dbuf_remap_impl_callback_arg_t; 4854 4855 static void 4856 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 4857 void *arg) 4858 { 4859 dbuf_remap_impl_callback_arg_t *drica = arg; 4860 objset_t *os = drica->drica_os; 4861 spa_t *spa = dmu_objset_spa(os); 4862 dmu_tx_t *tx = drica->drica_tx; 4863 4864 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4865 4866 if (os == spa_meta_objset(spa)) { 4867 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 4868 } else { 4869 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 4870 size, drica->drica_blk_birth, tx); 4871 } 4872 } 4873 4874 static void 4875 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) 4876 { 4877 blkptr_t bp_copy = *bp; 4878 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4879 dbuf_remap_impl_callback_arg_t drica; 4880 4881 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4882 4883 drica.drica_os = dn->dn_objset; 4884 drica.drica_blk_birth = bp->blk_birth; 4885 drica.drica_tx = tx; 4886 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 4887 &drica)) { 4888 /* 4889 * If the blkptr being remapped is tracked by a livelist, 4890 * then we need to make sure the livelist reflects the update. 4891 * First, cancel out the old blkptr by appending a 'FREE' 4892 * entry. Next, add an 'ALLOC' to track the new version. This 4893 * way we avoid trying to free an inaccurate blkptr at delete. 4894 * Note that embedded blkptrs are not tracked in livelists. 4895 */ 4896 if (dn->dn_objset != spa_meta_objset(spa)) { 4897 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset); 4898 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && 4899 bp->blk_birth > ds->ds_dir->dd_origin_txg) { 4900 ASSERT(!BP_IS_EMBEDDED(bp)); 4901 ASSERT(dsl_dir_is_clone(ds->ds_dir)); 4902 ASSERT(spa_feature_is_enabled(spa, 4903 SPA_FEATURE_LIVELIST)); 4904 bplist_append(&ds->ds_dir->dd_pending_frees, 4905 bp); 4906 bplist_append(&ds->ds_dir->dd_pending_allocs, 4907 &bp_copy); 4908 } 4909 } 4910 4911 /* 4912 * The db_rwlock prevents dbuf_read_impl() from 4913 * dereferencing the BP while we are changing it. To 4914 * avoid lock contention, only grab it when we are actually 4915 * changing the BP. 4916 */ 4917 if (rw != NULL) 4918 rw_enter(rw, RW_WRITER); 4919 *bp = bp_copy; 4920 if (rw != NULL) 4921 rw_exit(rw); 4922 } 4923 } 4924 4925 /* 4926 * Remap any existing BP's to concrete vdevs, if possible. 4927 */ 4928 static void 4929 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 4930 { 4931 spa_t *spa = dmu_objset_spa(db->db_objset); 4932 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4933 4934 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 4935 return; 4936 4937 if (db->db_level > 0) { 4938 blkptr_t *bp = db->db.db_data; 4939 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 4940 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx); 4941 } 4942 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 4943 dnode_phys_t *dnp = db->db.db_data; 4944 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 4945 DMU_OT_DNODE); 4946 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; 4947 i += dnp[i].dn_extra_slots + 1) { 4948 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 4949 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL : 4950 &dn->dn_dbuf->db_rwlock); 4951 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock, 4952 tx); 4953 } 4954 } 4955 } 4956 } 4957 4958 4959 /* Issue I/O to commit a dirty buffer to disk. */ 4960 static void 4961 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 4962 { 4963 dmu_buf_impl_t *db = dr->dr_dbuf; 4964 dnode_t *dn = dr->dr_dnode; 4965 objset_t *os; 4966 dmu_buf_impl_t *parent = db->db_parent; 4967 uint64_t txg = tx->tx_txg; 4968 zbookmark_phys_t zb; 4969 zio_prop_t zp; 4970 zio_t *pio; /* parent I/O */ 4971 int wp_flag = 0; 4972 4973 ASSERT(dmu_tx_is_syncing(tx)); 4974 4975 os = dn->dn_objset; 4976 4977 if (db->db_state != DB_NOFILL) { 4978 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 4979 /* 4980 * Private object buffers are released here rather 4981 * than in dbuf_dirty() since they are only modified 4982 * in the syncing context and we don't want the 4983 * overhead of making multiple copies of the data. 4984 */ 4985 if (BP_IS_HOLE(db->db_blkptr)) { 4986 arc_buf_thaw(data); 4987 } else { 4988 dbuf_release_bp(db); 4989 } 4990 dbuf_remap(dn, db, tx); 4991 } 4992 } 4993 4994 if (parent != dn->dn_dbuf) { 4995 /* Our parent is an indirect block. */ 4996 /* We have a dirty parent that has been scheduled for write. */ 4997 ASSERT(parent && parent->db_data_pending); 4998 /* Our parent's buffer is one level closer to the dnode. */ 4999 ASSERT(db->db_level == parent->db_level-1); 5000 /* 5001 * We're about to modify our parent's db_data by modifying 5002 * our block pointer, so the parent must be released. 5003 */ 5004 ASSERT(arc_released(parent->db_buf)); 5005 pio = parent->db_data_pending->dr_zio; 5006 } else { 5007 /* Our parent is the dnode itself. */ 5008 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 5009 db->db_blkid != DMU_SPILL_BLKID) || 5010 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 5011 if (db->db_blkid != DMU_SPILL_BLKID) 5012 ASSERT3P(db->db_blkptr, ==, 5013 &dn->dn_phys->dn_blkptr[db->db_blkid]); 5014 pio = dn->dn_zio; 5015 } 5016 5017 ASSERT(db->db_level == 0 || data == db->db_buf); 5018 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 5019 ASSERT(pio); 5020 5021 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 5022 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 5023 db->db.db_object, db->db_level, db->db_blkid); 5024 5025 if (db->db_blkid == DMU_SPILL_BLKID) 5026 wp_flag = WP_SPILL; 5027 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 5028 5029 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 5030 5031 /* 5032 * We copy the blkptr now (rather than when we instantiate the dirty 5033 * record), because its value can change between open context and 5034 * syncing context. We do not need to hold dn_struct_rwlock to read 5035 * db_blkptr because we are in syncing context. 5036 */ 5037 dr->dr_bp_copy = *db->db_blkptr; 5038 5039 if (db->db_level == 0 && 5040 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 5041 /* 5042 * The BP for this block has been provided by open context 5043 * (by dmu_sync() or dmu_buf_write_embedded()). 5044 */ 5045 abd_t *contents = (data != NULL) ? 5046 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 5047 5048 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy, 5049 contents, db->db.db_size, db->db.db_size, &zp, 5050 dbuf_write_override_ready, NULL, NULL, 5051 dbuf_write_override_done, 5052 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 5053 mutex_enter(&db->db_mtx); 5054 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 5055 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 5056 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 5057 mutex_exit(&db->db_mtx); 5058 } else if (db->db_state == DB_NOFILL) { 5059 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 5060 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 5061 dr->dr_zio = zio_write(pio, os->os_spa, txg, 5062 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 5063 dbuf_write_nofill_ready, NULL, NULL, 5064 dbuf_write_nofill_done, db, 5065 ZIO_PRIORITY_ASYNC_WRITE, 5066 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 5067 } else { 5068 ASSERT(arc_released(data)); 5069 5070 /* 5071 * For indirect blocks, we want to setup the children 5072 * ready callback so that we can properly handle an indirect 5073 * block that only contains holes. 5074 */ 5075 arc_write_done_func_t *children_ready_cb = NULL; 5076 if (db->db_level != 0) 5077 children_ready_cb = dbuf_write_children_ready; 5078 5079 dr->dr_zio = arc_write(pio, os->os_spa, txg, 5080 &dr->dr_bp_copy, data, dbuf_is_l2cacheable(db), 5081 &zp, dbuf_write_ready, 5082 children_ready_cb, dbuf_write_physdone, 5083 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, 5084 ZIO_FLAG_MUSTSUCCEED, &zb); 5085 } 5086 } 5087 5088 EXPORT_SYMBOL(dbuf_find); 5089 EXPORT_SYMBOL(dbuf_is_metadata); 5090 EXPORT_SYMBOL(dbuf_destroy); 5091 EXPORT_SYMBOL(dbuf_loan_arcbuf); 5092 EXPORT_SYMBOL(dbuf_whichblock); 5093 EXPORT_SYMBOL(dbuf_read); 5094 EXPORT_SYMBOL(dbuf_unoverride); 5095 EXPORT_SYMBOL(dbuf_free_range); 5096 EXPORT_SYMBOL(dbuf_new_size); 5097 EXPORT_SYMBOL(dbuf_release_bp); 5098 EXPORT_SYMBOL(dbuf_dirty); 5099 EXPORT_SYMBOL(dmu_buf_set_crypt_params); 5100 EXPORT_SYMBOL(dmu_buf_will_dirty); 5101 EXPORT_SYMBOL(dmu_buf_is_dirty); 5102 EXPORT_SYMBOL(dmu_buf_will_not_fill); 5103 EXPORT_SYMBOL(dmu_buf_will_fill); 5104 EXPORT_SYMBOL(dmu_buf_fill_done); 5105 EXPORT_SYMBOL(dmu_buf_rele); 5106 EXPORT_SYMBOL(dbuf_assign_arcbuf); 5107 EXPORT_SYMBOL(dbuf_prefetch); 5108 EXPORT_SYMBOL(dbuf_hold_impl); 5109 EXPORT_SYMBOL(dbuf_hold); 5110 EXPORT_SYMBOL(dbuf_hold_level); 5111 EXPORT_SYMBOL(dbuf_create_bonus); 5112 EXPORT_SYMBOL(dbuf_spill_set_blksz); 5113 EXPORT_SYMBOL(dbuf_rm_spill); 5114 EXPORT_SYMBOL(dbuf_add_ref); 5115 EXPORT_SYMBOL(dbuf_rele); 5116 EXPORT_SYMBOL(dbuf_rele_and_unlock); 5117 EXPORT_SYMBOL(dbuf_refcount); 5118 EXPORT_SYMBOL(dbuf_sync_list); 5119 EXPORT_SYMBOL(dmu_buf_set_user); 5120 EXPORT_SYMBOL(dmu_buf_set_user_ie); 5121 EXPORT_SYMBOL(dmu_buf_get_user); 5122 EXPORT_SYMBOL(dmu_buf_get_blkptr); 5123 5124 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW, 5125 "Maximum size in bytes of the dbuf cache."); 5126 5127 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW, 5128 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction."); 5129 5130 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW, 5131 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops."); 5132 5133 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW, 5134 "Maximum size in bytes of dbuf metadata cache."); 5135 5136 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW, 5137 "Set size of dbuf cache to log2 fraction of arc size."); 5138 5139 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW, 5140 "Set size of dbuf metadata cache to log2 fraction of arc size."); 5141 5142 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD, 5143 "Set size of dbuf cache mutex array as log2 shift."); 5144