1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright (c) 2019, Klara Inc. 28 * Copyright (c) 2019, Allan Jude 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/arc.h> 33 #include <sys/dmu.h> 34 #include <sys/dmu_send.h> 35 #include <sys/dmu_impl.h> 36 #include <sys/dbuf.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/dsl_dataset.h> 39 #include <sys/dsl_dir.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/spa.h> 42 #include <sys/zio.h> 43 #include <sys/dmu_zfetch.h> 44 #include <sys/sa.h> 45 #include <sys/sa_impl.h> 46 #include <sys/zfeature.h> 47 #include <sys/blkptr.h> 48 #include <sys/range_tree.h> 49 #include <sys/trace_zfs.h> 50 #include <sys/callb.h> 51 #include <sys/abd.h> 52 #include <sys/vdev.h> 53 #include <cityhash.h> 54 #include <sys/spa_impl.h> 55 #include <sys/wmsum.h> 56 #include <sys/vdev_impl.h> 57 58 static kstat_t *dbuf_ksp; 59 60 typedef struct dbuf_stats { 61 /* 62 * Various statistics about the size of the dbuf cache. 63 */ 64 kstat_named_t cache_count; 65 kstat_named_t cache_size_bytes; 66 kstat_named_t cache_size_bytes_max; 67 /* 68 * Statistics regarding the bounds on the dbuf cache size. 69 */ 70 kstat_named_t cache_target_bytes; 71 kstat_named_t cache_lowater_bytes; 72 kstat_named_t cache_hiwater_bytes; 73 /* 74 * Total number of dbuf cache evictions that have occurred. 75 */ 76 kstat_named_t cache_total_evicts; 77 /* 78 * The distribution of dbuf levels in the dbuf cache and 79 * the total size of all dbufs at each level. 80 */ 81 kstat_named_t cache_levels[DN_MAX_LEVELS]; 82 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS]; 83 /* 84 * Statistics about the dbuf hash table. 85 */ 86 kstat_named_t hash_hits; 87 kstat_named_t hash_misses; 88 kstat_named_t hash_collisions; 89 kstat_named_t hash_elements; 90 kstat_named_t hash_elements_max; 91 /* 92 * Number of sublists containing more than one dbuf in the dbuf 93 * hash table. Keep track of the longest hash chain. 94 */ 95 kstat_named_t hash_chains; 96 kstat_named_t hash_chain_max; 97 /* 98 * Number of times a dbuf_create() discovers that a dbuf was 99 * already created and in the dbuf hash table. 100 */ 101 kstat_named_t hash_insert_race; 102 /* 103 * Number of entries in the hash table dbuf and mutex arrays. 104 */ 105 kstat_named_t hash_table_count; 106 kstat_named_t hash_mutex_count; 107 /* 108 * Statistics about the size of the metadata dbuf cache. 109 */ 110 kstat_named_t metadata_cache_count; 111 kstat_named_t metadata_cache_size_bytes; 112 kstat_named_t metadata_cache_size_bytes_max; 113 /* 114 * For diagnostic purposes, this is incremented whenever we can't add 115 * something to the metadata cache because it's full, and instead put 116 * the data in the regular dbuf cache. 117 */ 118 kstat_named_t metadata_cache_overflow; 119 } dbuf_stats_t; 120 121 dbuf_stats_t dbuf_stats = { 122 { "cache_count", KSTAT_DATA_UINT64 }, 123 { "cache_size_bytes", KSTAT_DATA_UINT64 }, 124 { "cache_size_bytes_max", KSTAT_DATA_UINT64 }, 125 { "cache_target_bytes", KSTAT_DATA_UINT64 }, 126 { "cache_lowater_bytes", KSTAT_DATA_UINT64 }, 127 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 }, 128 { "cache_total_evicts", KSTAT_DATA_UINT64 }, 129 { { "cache_levels_N", KSTAT_DATA_UINT64 } }, 130 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } }, 131 { "hash_hits", KSTAT_DATA_UINT64 }, 132 { "hash_misses", KSTAT_DATA_UINT64 }, 133 { "hash_collisions", KSTAT_DATA_UINT64 }, 134 { "hash_elements", KSTAT_DATA_UINT64 }, 135 { "hash_elements_max", KSTAT_DATA_UINT64 }, 136 { "hash_chains", KSTAT_DATA_UINT64 }, 137 { "hash_chain_max", KSTAT_DATA_UINT64 }, 138 { "hash_insert_race", KSTAT_DATA_UINT64 }, 139 { "hash_table_count", KSTAT_DATA_UINT64 }, 140 { "hash_mutex_count", KSTAT_DATA_UINT64 }, 141 { "metadata_cache_count", KSTAT_DATA_UINT64 }, 142 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 }, 143 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 }, 144 { "metadata_cache_overflow", KSTAT_DATA_UINT64 } 145 }; 146 147 struct { 148 wmsum_t cache_count; 149 wmsum_t cache_total_evicts; 150 wmsum_t cache_levels[DN_MAX_LEVELS]; 151 wmsum_t cache_levels_bytes[DN_MAX_LEVELS]; 152 wmsum_t hash_hits; 153 wmsum_t hash_misses; 154 wmsum_t hash_collisions; 155 wmsum_t hash_chains; 156 wmsum_t hash_insert_race; 157 wmsum_t metadata_cache_count; 158 wmsum_t metadata_cache_overflow; 159 } dbuf_sums; 160 161 #define DBUF_STAT_INCR(stat, val) \ 162 wmsum_add(&dbuf_sums.stat, val); 163 #define DBUF_STAT_DECR(stat, val) \ 164 DBUF_STAT_INCR(stat, -(val)); 165 #define DBUF_STAT_BUMP(stat) \ 166 DBUF_STAT_INCR(stat, 1); 167 #define DBUF_STAT_BUMPDOWN(stat) \ 168 DBUF_STAT_INCR(stat, -1); 169 #define DBUF_STAT_MAX(stat, v) { \ 170 uint64_t _m; \ 171 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \ 172 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\ 173 continue; \ 174 } 175 176 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 177 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 178 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr); 179 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags); 180 181 /* 182 * Global data structures and functions for the dbuf cache. 183 */ 184 static kmem_cache_t *dbuf_kmem_cache; 185 static taskq_t *dbu_evict_taskq; 186 187 static kthread_t *dbuf_cache_evict_thread; 188 static kmutex_t dbuf_evict_lock; 189 static kcondvar_t dbuf_evict_cv; 190 static boolean_t dbuf_evict_thread_exit; 191 192 /* 193 * There are two dbuf caches; each dbuf can only be in one of them at a time. 194 * 195 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 196 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 197 * that represent the metadata that describes filesystems/snapshots/ 198 * bookmarks/properties/etc. We only evict from this cache when we export a 199 * pool, to short-circuit as much I/O as possible for all administrative 200 * commands that need the metadata. There is no eviction policy for this 201 * cache, because we try to only include types in it which would occupy a 202 * very small amount of space per object but create a large impact on the 203 * performance of these commands. Instead, after it reaches a maximum size 204 * (which should only happen on very small memory systems with a very large 205 * number of filesystem objects), we stop taking new dbufs into the 206 * metadata cache, instead putting them in the normal dbuf cache. 207 * 208 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 209 * are not currently held but have been recently released. These dbufs 210 * are not eligible for arc eviction until they are aged out of the cache. 211 * Dbufs that are aged out of the cache will be immediately destroyed and 212 * become eligible for arc eviction. 213 * 214 * Dbufs are added to these caches once the last hold is released. If a dbuf is 215 * later accessed and still exists in the dbuf cache, then it will be removed 216 * from the cache and later re-added to the head of the cache. 217 * 218 * If a given dbuf meets the requirements for the metadata cache, it will go 219 * there, otherwise it will be considered for the generic LRU dbuf cache. The 220 * caches and the refcounts tracking their sizes are stored in an array indexed 221 * by those caches' matching enum values (from dbuf_cached_state_t). 222 */ 223 typedef struct dbuf_cache { 224 multilist_t cache; 225 zfs_refcount_t size ____cacheline_aligned; 226 } dbuf_cache_t; 227 dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 228 229 /* Size limits for the caches */ 230 static uint64_t dbuf_cache_max_bytes = UINT64_MAX; 231 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX; 232 233 /* Set the default sizes of the caches to log2 fraction of arc size */ 234 static uint_t dbuf_cache_shift = 5; 235 static uint_t dbuf_metadata_cache_shift = 6; 236 237 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */ 238 static uint_t dbuf_mutex_cache_shift = 0; 239 240 static unsigned long dbuf_cache_target_bytes(void); 241 static unsigned long dbuf_metadata_cache_target_bytes(void); 242 243 /* 244 * The LRU dbuf cache uses a three-stage eviction policy: 245 * - A low water marker designates when the dbuf eviction thread 246 * should stop evicting from the dbuf cache. 247 * - When we reach the maximum size (aka mid water mark), we 248 * signal the eviction thread to run. 249 * - The high water mark indicates when the eviction thread 250 * is unable to keep up with the incoming load and eviction must 251 * happen in the context of the calling thread. 252 * 253 * The dbuf cache: 254 * (max size) 255 * low water mid water hi water 256 * +----------------------------------------+----------+----------+ 257 * | | | | 258 * | | | | 259 * | | | | 260 * | | | | 261 * +----------------------------------------+----------+----------+ 262 * stop signal evict 263 * evicting eviction directly 264 * thread 265 * 266 * The high and low water marks indicate the operating range for the eviction 267 * thread. The low water mark is, by default, 90% of the total size of the 268 * cache and the high water mark is at 110% (both of these percentages can be 269 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 270 * respectively). The eviction thread will try to ensure that the cache remains 271 * within this range by waking up every second and checking if the cache is 272 * above the low water mark. The thread can also be woken up by callers adding 273 * elements into the cache if the cache is larger than the mid water (i.e max 274 * cache size). Once the eviction thread is woken up and eviction is required, 275 * it will continue evicting buffers until it's able to reduce the cache size 276 * to the low water mark. If the cache size continues to grow and hits the high 277 * water mark, then callers adding elements to the cache will begin to evict 278 * directly from the cache until the cache is no longer above the high water 279 * mark. 280 */ 281 282 /* 283 * The percentage above and below the maximum cache size. 284 */ 285 static uint_t dbuf_cache_hiwater_pct = 10; 286 static uint_t dbuf_cache_lowater_pct = 10; 287 288 static int 289 dbuf_cons(void *vdb, void *unused, int kmflag) 290 { 291 (void) unused, (void) kmflag; 292 dmu_buf_impl_t *db = vdb; 293 memset(db, 0, sizeof (dmu_buf_impl_t)); 294 295 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 296 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL); 297 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 298 multilist_link_init(&db->db_cache_link); 299 zfs_refcount_create(&db->db_holds); 300 301 return (0); 302 } 303 304 static void 305 dbuf_dest(void *vdb, void *unused) 306 { 307 (void) unused; 308 dmu_buf_impl_t *db = vdb; 309 mutex_destroy(&db->db_mtx); 310 rw_destroy(&db->db_rwlock); 311 cv_destroy(&db->db_changed); 312 ASSERT(!multilist_link_active(&db->db_cache_link)); 313 zfs_refcount_destroy(&db->db_holds); 314 } 315 316 /* 317 * dbuf hash table routines 318 */ 319 static dbuf_hash_table_t dbuf_hash_table; 320 321 /* 322 * We use Cityhash for this. It's fast, and has good hash properties without 323 * requiring any large static buffers. 324 */ 325 static uint64_t 326 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 327 { 328 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 329 } 330 331 #define DTRACE_SET_STATE(db, why) \ 332 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \ 333 const char *, why) 334 335 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 336 ((dbuf)->db.db_object == (obj) && \ 337 (dbuf)->db_objset == (os) && \ 338 (dbuf)->db_level == (level) && \ 339 (dbuf)->db_blkid == (blkid)) 340 341 dmu_buf_impl_t * 342 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid, 343 uint64_t *hash_out) 344 { 345 dbuf_hash_table_t *h = &dbuf_hash_table; 346 uint64_t hv; 347 uint64_t idx; 348 dmu_buf_impl_t *db; 349 350 hv = dbuf_hash(os, obj, level, blkid); 351 idx = hv & h->hash_table_mask; 352 353 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 354 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 355 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 356 mutex_enter(&db->db_mtx); 357 if (db->db_state != DB_EVICTING) { 358 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 359 return (db); 360 } 361 mutex_exit(&db->db_mtx); 362 } 363 } 364 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 365 if (hash_out != NULL) 366 *hash_out = hv; 367 return (NULL); 368 } 369 370 static dmu_buf_impl_t * 371 dbuf_find_bonus(objset_t *os, uint64_t object) 372 { 373 dnode_t *dn; 374 dmu_buf_impl_t *db = NULL; 375 376 if (dnode_hold(os, object, FTAG, &dn) == 0) { 377 rw_enter(&dn->dn_struct_rwlock, RW_READER); 378 if (dn->dn_bonus != NULL) { 379 db = dn->dn_bonus; 380 mutex_enter(&db->db_mtx); 381 } 382 rw_exit(&dn->dn_struct_rwlock); 383 dnode_rele(dn, FTAG); 384 } 385 return (db); 386 } 387 388 /* 389 * Insert an entry into the hash table. If there is already an element 390 * equal to elem in the hash table, then the already existing element 391 * will be returned and the new element will not be inserted. 392 * Otherwise returns NULL. 393 */ 394 static dmu_buf_impl_t * 395 dbuf_hash_insert(dmu_buf_impl_t *db) 396 { 397 dbuf_hash_table_t *h = &dbuf_hash_table; 398 objset_t *os = db->db_objset; 399 uint64_t obj = db->db.db_object; 400 int level = db->db_level; 401 uint64_t blkid, idx; 402 dmu_buf_impl_t *dbf; 403 uint32_t i; 404 405 blkid = db->db_blkid; 406 ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash); 407 idx = db->db_hash & h->hash_table_mask; 408 409 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 410 for (dbf = h->hash_table[idx], i = 0; dbf != NULL; 411 dbf = dbf->db_hash_next, i++) { 412 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 413 mutex_enter(&dbf->db_mtx); 414 if (dbf->db_state != DB_EVICTING) { 415 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 416 return (dbf); 417 } 418 mutex_exit(&dbf->db_mtx); 419 } 420 } 421 422 if (i > 0) { 423 DBUF_STAT_BUMP(hash_collisions); 424 if (i == 1) 425 DBUF_STAT_BUMP(hash_chains); 426 427 DBUF_STAT_MAX(hash_chain_max, i); 428 } 429 430 mutex_enter(&db->db_mtx); 431 db->db_hash_next = h->hash_table[idx]; 432 h->hash_table[idx] = db; 433 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 434 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64); 435 DBUF_STAT_MAX(hash_elements_max, he); 436 437 return (NULL); 438 } 439 440 /* 441 * This returns whether this dbuf should be stored in the metadata cache, which 442 * is based on whether it's from one of the dnode types that store data related 443 * to traversing dataset hierarchies. 444 */ 445 static boolean_t 446 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 447 { 448 DB_DNODE_ENTER(db); 449 dmu_object_type_t type = DB_DNODE(db)->dn_type; 450 DB_DNODE_EXIT(db); 451 452 /* Check if this dbuf is one of the types we care about */ 453 if (DMU_OT_IS_METADATA_CACHED(type)) { 454 /* If we hit this, then we set something up wrong in dmu_ot */ 455 ASSERT(DMU_OT_IS_METADATA(type)); 456 457 /* 458 * Sanity check for small-memory systems: don't allocate too 459 * much memory for this purpose. 460 */ 461 if (zfs_refcount_count( 462 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 463 dbuf_metadata_cache_target_bytes()) { 464 DBUF_STAT_BUMP(metadata_cache_overflow); 465 return (B_FALSE); 466 } 467 468 return (B_TRUE); 469 } 470 471 return (B_FALSE); 472 } 473 474 /* 475 * Remove an entry from the hash table. It must be in the EVICTING state. 476 */ 477 static void 478 dbuf_hash_remove(dmu_buf_impl_t *db) 479 { 480 dbuf_hash_table_t *h = &dbuf_hash_table; 481 uint64_t idx; 482 dmu_buf_impl_t *dbf, **dbp; 483 484 ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level, 485 db->db_blkid), ==, db->db_hash); 486 idx = db->db_hash & h->hash_table_mask; 487 488 /* 489 * We mustn't hold db_mtx to maintain lock ordering: 490 * DBUF_HASH_MUTEX > db_mtx. 491 */ 492 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 493 ASSERT(db->db_state == DB_EVICTING); 494 ASSERT(!MUTEX_HELD(&db->db_mtx)); 495 496 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 497 dbp = &h->hash_table[idx]; 498 while ((dbf = *dbp) != db) { 499 dbp = &dbf->db_hash_next; 500 ASSERT(dbf != NULL); 501 } 502 *dbp = db->db_hash_next; 503 db->db_hash_next = NULL; 504 if (h->hash_table[idx] && 505 h->hash_table[idx]->db_hash_next == NULL) 506 DBUF_STAT_BUMPDOWN(hash_chains); 507 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 508 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64); 509 } 510 511 typedef enum { 512 DBVU_EVICTING, 513 DBVU_NOT_EVICTING 514 } dbvu_verify_type_t; 515 516 static void 517 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 518 { 519 #ifdef ZFS_DEBUG 520 int64_t holds; 521 522 if (db->db_user == NULL) 523 return; 524 525 /* Only data blocks support the attachment of user data. */ 526 ASSERT(db->db_level == 0); 527 528 /* Clients must resolve a dbuf before attaching user data. */ 529 ASSERT(db->db.db_data != NULL); 530 ASSERT3U(db->db_state, ==, DB_CACHED); 531 532 holds = zfs_refcount_count(&db->db_holds); 533 if (verify_type == DBVU_EVICTING) { 534 /* 535 * Immediate eviction occurs when holds == dirtycnt. 536 * For normal eviction buffers, holds is zero on 537 * eviction, except when dbuf_fix_old_data() calls 538 * dbuf_clear_data(). However, the hold count can grow 539 * during eviction even though db_mtx is held (see 540 * dmu_bonus_hold() for an example), so we can only 541 * test the generic invariant that holds >= dirtycnt. 542 */ 543 ASSERT3U(holds, >=, db->db_dirtycnt); 544 } else { 545 if (db->db_user_immediate_evict == TRUE) 546 ASSERT3U(holds, >=, db->db_dirtycnt); 547 else 548 ASSERT3U(holds, >, 0); 549 } 550 #endif 551 } 552 553 static void 554 dbuf_evict_user(dmu_buf_impl_t *db) 555 { 556 dmu_buf_user_t *dbu = db->db_user; 557 558 ASSERT(MUTEX_HELD(&db->db_mtx)); 559 560 if (dbu == NULL) 561 return; 562 563 dbuf_verify_user(db, DBVU_EVICTING); 564 db->db_user = NULL; 565 566 #ifdef ZFS_DEBUG 567 if (dbu->dbu_clear_on_evict_dbufp != NULL) 568 *dbu->dbu_clear_on_evict_dbufp = NULL; 569 #endif 570 571 /* 572 * There are two eviction callbacks - one that we call synchronously 573 * and one that we invoke via a taskq. The async one is useful for 574 * avoiding lock order reversals and limiting stack depth. 575 * 576 * Note that if we have a sync callback but no async callback, 577 * it's likely that the sync callback will free the structure 578 * containing the dbu. In that case we need to take care to not 579 * dereference dbu after calling the sync evict func. 580 */ 581 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 582 583 if (dbu->dbu_evict_func_sync != NULL) 584 dbu->dbu_evict_func_sync(dbu); 585 586 if (has_async) { 587 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 588 dbu, 0, &dbu->dbu_tqent); 589 } 590 } 591 592 boolean_t 593 dbuf_is_metadata(dmu_buf_impl_t *db) 594 { 595 /* 596 * Consider indirect blocks and spill blocks to be meta data. 597 */ 598 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { 599 return (B_TRUE); 600 } else { 601 boolean_t is_metadata; 602 603 DB_DNODE_ENTER(db); 604 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 605 DB_DNODE_EXIT(db); 606 607 return (is_metadata); 608 } 609 } 610 611 /* 612 * We want to exclude buffers that are on a special allocation class from 613 * L2ARC. 614 */ 615 boolean_t 616 dbuf_is_l2cacheable(dmu_buf_impl_t *db) 617 { 618 vdev_t *vd = NULL; 619 zfs_cache_type_t cache = db->db_objset->os_secondary_cache; 620 blkptr_t *bp = db->db_blkptr; 621 622 if (bp != NULL && !BP_IS_HOLE(bp)) { 623 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); 624 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev; 625 626 if (vdev < rvd->vdev_children) 627 vd = rvd->vdev_child[vdev]; 628 629 if (cache == ZFS_CACHE_ALL || 630 (dbuf_is_metadata(db) && cache == ZFS_CACHE_METADATA)) { 631 if (vd == NULL) 632 return (B_TRUE); 633 634 if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 635 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) || 636 l2arc_exclude_special == 0) 637 return (B_TRUE); 638 } 639 } 640 641 return (B_FALSE); 642 } 643 644 static inline boolean_t 645 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level) 646 { 647 vdev_t *vd = NULL; 648 zfs_cache_type_t cache = dn->dn_objset->os_secondary_cache; 649 650 if (bp != NULL && !BP_IS_HOLE(bp)) { 651 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); 652 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev; 653 654 if (vdev < rvd->vdev_children) 655 vd = rvd->vdev_child[vdev]; 656 657 if (cache == ZFS_CACHE_ALL || ((level > 0 || 658 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)) && 659 cache == ZFS_CACHE_METADATA)) { 660 if (vd == NULL) 661 return (B_TRUE); 662 663 if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 664 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) || 665 l2arc_exclude_special == 0) 666 return (B_TRUE); 667 } 668 } 669 670 return (B_FALSE); 671 } 672 673 674 /* 675 * This function *must* return indices evenly distributed between all 676 * sublists of the multilist. This is needed due to how the dbuf eviction 677 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 678 * distributed between all sublists and uses this assumption when 679 * deciding which sublist to evict from and how much to evict from it. 680 */ 681 static unsigned int 682 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 683 { 684 dmu_buf_impl_t *db = obj; 685 686 /* 687 * The assumption here, is the hash value for a given 688 * dmu_buf_impl_t will remain constant throughout it's lifetime 689 * (i.e. it's objset, object, level and blkid fields don't change). 690 * Thus, we don't need to store the dbuf's sublist index 691 * on insertion, as this index can be recalculated on removal. 692 * 693 * Also, the low order bits of the hash value are thought to be 694 * distributed evenly. Otherwise, in the case that the multilist 695 * has a power of two number of sublists, each sublists' usage 696 * would not be evenly distributed. In this context full 64bit 697 * division would be a waste of time, so limit it to 32 bits. 698 */ 699 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object, 700 db->db_level, db->db_blkid) % 701 multilist_get_num_sublists(ml)); 702 } 703 704 /* 705 * The target size of the dbuf cache can grow with the ARC target, 706 * unless limited by the tunable dbuf_cache_max_bytes. 707 */ 708 static inline unsigned long 709 dbuf_cache_target_bytes(void) 710 { 711 return (MIN(dbuf_cache_max_bytes, 712 arc_target_bytes() >> dbuf_cache_shift)); 713 } 714 715 /* 716 * The target size of the dbuf metadata cache can grow with the ARC target, 717 * unless limited by the tunable dbuf_metadata_cache_max_bytes. 718 */ 719 static inline unsigned long 720 dbuf_metadata_cache_target_bytes(void) 721 { 722 return (MIN(dbuf_metadata_cache_max_bytes, 723 arc_target_bytes() >> dbuf_metadata_cache_shift)); 724 } 725 726 static inline uint64_t 727 dbuf_cache_hiwater_bytes(void) 728 { 729 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 730 return (dbuf_cache_target + 731 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100); 732 } 733 734 static inline uint64_t 735 dbuf_cache_lowater_bytes(void) 736 { 737 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 738 return (dbuf_cache_target - 739 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100); 740 } 741 742 static inline boolean_t 743 dbuf_cache_above_lowater(void) 744 { 745 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 746 dbuf_cache_lowater_bytes()); 747 } 748 749 /* 750 * Evict the oldest eligible dbuf from the dbuf cache. 751 */ 752 static void 753 dbuf_evict_one(void) 754 { 755 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache); 756 multilist_sublist_t *mls = multilist_sublist_lock( 757 &dbuf_caches[DB_DBUF_CACHE].cache, idx); 758 759 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 760 761 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 762 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 763 db = multilist_sublist_prev(mls, db); 764 } 765 766 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 767 multilist_sublist_t *, mls); 768 769 if (db != NULL) { 770 multilist_sublist_remove(mls, db); 771 multilist_sublist_unlock(mls); 772 (void) zfs_refcount_remove_many( 773 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db); 774 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 775 DBUF_STAT_BUMPDOWN(cache_count); 776 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 777 db->db.db_size); 778 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 779 db->db_caching_status = DB_NO_CACHE; 780 dbuf_destroy(db); 781 DBUF_STAT_BUMP(cache_total_evicts); 782 } else { 783 multilist_sublist_unlock(mls); 784 } 785 } 786 787 /* 788 * The dbuf evict thread is responsible for aging out dbufs from the 789 * cache. Once the cache has reached it's maximum size, dbufs are removed 790 * and destroyed. The eviction thread will continue running until the size 791 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 792 * out of the cache it is destroyed and becomes eligible for arc eviction. 793 */ 794 static __attribute__((noreturn)) void 795 dbuf_evict_thread(void *unused) 796 { 797 (void) unused; 798 callb_cpr_t cpr; 799 800 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 801 802 mutex_enter(&dbuf_evict_lock); 803 while (!dbuf_evict_thread_exit) { 804 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 805 CALLB_CPR_SAFE_BEGIN(&cpr); 806 (void) cv_timedwait_idle_hires(&dbuf_evict_cv, 807 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 808 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 809 } 810 mutex_exit(&dbuf_evict_lock); 811 812 /* 813 * Keep evicting as long as we're above the low water mark 814 * for the cache. We do this without holding the locks to 815 * minimize lock contention. 816 */ 817 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 818 dbuf_evict_one(); 819 } 820 821 mutex_enter(&dbuf_evict_lock); 822 } 823 824 dbuf_evict_thread_exit = B_FALSE; 825 cv_broadcast(&dbuf_evict_cv); 826 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 827 thread_exit(); 828 } 829 830 /* 831 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 832 * If the dbuf cache is at its high water mark, then evict a dbuf from the 833 * dbuf cache using the caller's context. 834 */ 835 static void 836 dbuf_evict_notify(uint64_t size) 837 { 838 /* 839 * We check if we should evict without holding the dbuf_evict_lock, 840 * because it's OK to occasionally make the wrong decision here, 841 * and grabbing the lock results in massive lock contention. 842 */ 843 if (size > dbuf_cache_target_bytes()) { 844 if (size > dbuf_cache_hiwater_bytes()) 845 dbuf_evict_one(); 846 cv_signal(&dbuf_evict_cv); 847 } 848 } 849 850 static int 851 dbuf_kstat_update(kstat_t *ksp, int rw) 852 { 853 dbuf_stats_t *ds = ksp->ks_data; 854 dbuf_hash_table_t *h = &dbuf_hash_table; 855 856 if (rw == KSTAT_WRITE) 857 return (SET_ERROR(EACCES)); 858 859 ds->cache_count.value.ui64 = 860 wmsum_value(&dbuf_sums.cache_count); 861 ds->cache_size_bytes.value.ui64 = 862 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size); 863 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes(); 864 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes(); 865 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes(); 866 ds->cache_total_evicts.value.ui64 = 867 wmsum_value(&dbuf_sums.cache_total_evicts); 868 for (int i = 0; i < DN_MAX_LEVELS; i++) { 869 ds->cache_levels[i].value.ui64 = 870 wmsum_value(&dbuf_sums.cache_levels[i]); 871 ds->cache_levels_bytes[i].value.ui64 = 872 wmsum_value(&dbuf_sums.cache_levels_bytes[i]); 873 } 874 ds->hash_hits.value.ui64 = 875 wmsum_value(&dbuf_sums.hash_hits); 876 ds->hash_misses.value.ui64 = 877 wmsum_value(&dbuf_sums.hash_misses); 878 ds->hash_collisions.value.ui64 = 879 wmsum_value(&dbuf_sums.hash_collisions); 880 ds->hash_chains.value.ui64 = 881 wmsum_value(&dbuf_sums.hash_chains); 882 ds->hash_insert_race.value.ui64 = 883 wmsum_value(&dbuf_sums.hash_insert_race); 884 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1; 885 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1; 886 ds->metadata_cache_count.value.ui64 = 887 wmsum_value(&dbuf_sums.metadata_cache_count); 888 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count( 889 &dbuf_caches[DB_DBUF_METADATA_CACHE].size); 890 ds->metadata_cache_overflow.value.ui64 = 891 wmsum_value(&dbuf_sums.metadata_cache_overflow); 892 return (0); 893 } 894 895 void 896 dbuf_init(void) 897 { 898 uint64_t hmsize, hsize = 1ULL << 16; 899 dbuf_hash_table_t *h = &dbuf_hash_table; 900 901 /* 902 * The hash table is big enough to fill one eighth of physical memory 903 * with an average block size of zfs_arc_average_blocksize (default 8K). 904 * By default, the table will take up 905 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 906 */ 907 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8) 908 hsize <<= 1; 909 910 h->hash_table = NULL; 911 while (h->hash_table == NULL) { 912 h->hash_table_mask = hsize - 1; 913 914 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); 915 if (h->hash_table == NULL) 916 hsize >>= 1; 917 918 ASSERT3U(hsize, >=, 1ULL << 10); 919 } 920 921 /* 922 * The hash table buckets are protected by an array of mutexes where 923 * each mutex is reponsible for protecting 128 buckets. A minimum 924 * array size of 8192 is targeted to avoid contention. 925 */ 926 if (dbuf_mutex_cache_shift == 0) 927 hmsize = MAX(hsize >> 7, 1ULL << 13); 928 else 929 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24); 930 931 h->hash_mutexes = NULL; 932 while (h->hash_mutexes == NULL) { 933 h->hash_mutex_mask = hmsize - 1; 934 935 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t), 936 KM_SLEEP); 937 if (h->hash_mutexes == NULL) 938 hmsize >>= 1; 939 } 940 941 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 942 sizeof (dmu_buf_impl_t), 943 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 944 945 for (int i = 0; i < hmsize; i++) 946 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 947 948 dbuf_stats_init(h); 949 950 /* 951 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 952 * configuration is not required. 953 */ 954 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); 955 956 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 957 multilist_create(&dbuf_caches[dcs].cache, 958 sizeof (dmu_buf_impl_t), 959 offsetof(dmu_buf_impl_t, db_cache_link), 960 dbuf_cache_multilist_index_func); 961 zfs_refcount_create(&dbuf_caches[dcs].size); 962 } 963 964 dbuf_evict_thread_exit = B_FALSE; 965 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 966 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 967 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 968 NULL, 0, &p0, TS_RUN, minclsyspri); 969 970 wmsum_init(&dbuf_sums.cache_count, 0); 971 wmsum_init(&dbuf_sums.cache_total_evicts, 0); 972 for (int i = 0; i < DN_MAX_LEVELS; i++) { 973 wmsum_init(&dbuf_sums.cache_levels[i], 0); 974 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0); 975 } 976 wmsum_init(&dbuf_sums.hash_hits, 0); 977 wmsum_init(&dbuf_sums.hash_misses, 0); 978 wmsum_init(&dbuf_sums.hash_collisions, 0); 979 wmsum_init(&dbuf_sums.hash_chains, 0); 980 wmsum_init(&dbuf_sums.hash_insert_race, 0); 981 wmsum_init(&dbuf_sums.metadata_cache_count, 0); 982 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0); 983 984 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc", 985 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t), 986 KSTAT_FLAG_VIRTUAL); 987 if (dbuf_ksp != NULL) { 988 for (int i = 0; i < DN_MAX_LEVELS; i++) { 989 snprintf(dbuf_stats.cache_levels[i].name, 990 KSTAT_STRLEN, "cache_level_%d", i); 991 dbuf_stats.cache_levels[i].data_type = 992 KSTAT_DATA_UINT64; 993 snprintf(dbuf_stats.cache_levels_bytes[i].name, 994 KSTAT_STRLEN, "cache_level_%d_bytes", i); 995 dbuf_stats.cache_levels_bytes[i].data_type = 996 KSTAT_DATA_UINT64; 997 } 998 dbuf_ksp->ks_data = &dbuf_stats; 999 dbuf_ksp->ks_update = dbuf_kstat_update; 1000 kstat_install(dbuf_ksp); 1001 } 1002 } 1003 1004 void 1005 dbuf_fini(void) 1006 { 1007 dbuf_hash_table_t *h = &dbuf_hash_table; 1008 1009 dbuf_stats_destroy(); 1010 1011 for (int i = 0; i < (h->hash_mutex_mask + 1); i++) 1012 mutex_destroy(&h->hash_mutexes[i]); 1013 1014 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 1015 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) * 1016 sizeof (kmutex_t)); 1017 1018 kmem_cache_destroy(dbuf_kmem_cache); 1019 taskq_destroy(dbu_evict_taskq); 1020 1021 mutex_enter(&dbuf_evict_lock); 1022 dbuf_evict_thread_exit = B_TRUE; 1023 while (dbuf_evict_thread_exit) { 1024 cv_signal(&dbuf_evict_cv); 1025 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 1026 } 1027 mutex_exit(&dbuf_evict_lock); 1028 1029 mutex_destroy(&dbuf_evict_lock); 1030 cv_destroy(&dbuf_evict_cv); 1031 1032 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 1033 zfs_refcount_destroy(&dbuf_caches[dcs].size); 1034 multilist_destroy(&dbuf_caches[dcs].cache); 1035 } 1036 1037 if (dbuf_ksp != NULL) { 1038 kstat_delete(dbuf_ksp); 1039 dbuf_ksp = NULL; 1040 } 1041 1042 wmsum_fini(&dbuf_sums.cache_count); 1043 wmsum_fini(&dbuf_sums.cache_total_evicts); 1044 for (int i = 0; i < DN_MAX_LEVELS; i++) { 1045 wmsum_fini(&dbuf_sums.cache_levels[i]); 1046 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]); 1047 } 1048 wmsum_fini(&dbuf_sums.hash_hits); 1049 wmsum_fini(&dbuf_sums.hash_misses); 1050 wmsum_fini(&dbuf_sums.hash_collisions); 1051 wmsum_fini(&dbuf_sums.hash_chains); 1052 wmsum_fini(&dbuf_sums.hash_insert_race); 1053 wmsum_fini(&dbuf_sums.metadata_cache_count); 1054 wmsum_fini(&dbuf_sums.metadata_cache_overflow); 1055 } 1056 1057 /* 1058 * Other stuff. 1059 */ 1060 1061 #ifdef ZFS_DEBUG 1062 static void 1063 dbuf_verify(dmu_buf_impl_t *db) 1064 { 1065 dnode_t *dn; 1066 dbuf_dirty_record_t *dr; 1067 uint32_t txg_prev; 1068 1069 ASSERT(MUTEX_HELD(&db->db_mtx)); 1070 1071 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 1072 return; 1073 1074 ASSERT(db->db_objset != NULL); 1075 DB_DNODE_ENTER(db); 1076 dn = DB_DNODE(db); 1077 if (dn == NULL) { 1078 ASSERT(db->db_parent == NULL); 1079 ASSERT(db->db_blkptr == NULL); 1080 } else { 1081 ASSERT3U(db->db.db_object, ==, dn->dn_object); 1082 ASSERT3P(db->db_objset, ==, dn->dn_objset); 1083 ASSERT3U(db->db_level, <, dn->dn_nlevels); 1084 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 1085 db->db_blkid == DMU_SPILL_BLKID || 1086 !avl_is_empty(&dn->dn_dbufs)); 1087 } 1088 if (db->db_blkid == DMU_BONUS_BLKID) { 1089 ASSERT(dn != NULL); 1090 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1091 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 1092 } else if (db->db_blkid == DMU_SPILL_BLKID) { 1093 ASSERT(dn != NULL); 1094 ASSERT0(db->db.db_offset); 1095 } else { 1096 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 1097 } 1098 1099 if ((dr = list_head(&db->db_dirty_records)) != NULL) { 1100 ASSERT(dr->dr_dbuf == db); 1101 txg_prev = dr->dr_txg; 1102 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL; 1103 dr = list_next(&db->db_dirty_records, dr)) { 1104 ASSERT(dr->dr_dbuf == db); 1105 ASSERT(txg_prev > dr->dr_txg); 1106 txg_prev = dr->dr_txg; 1107 } 1108 } 1109 1110 /* 1111 * We can't assert that db_size matches dn_datablksz because it 1112 * can be momentarily different when another thread is doing 1113 * dnode_set_blksz(). 1114 */ 1115 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 1116 dr = db->db_data_pending; 1117 /* 1118 * It should only be modified in syncing context, so 1119 * make sure we only have one copy of the data. 1120 */ 1121 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 1122 } 1123 1124 /* verify db->db_blkptr */ 1125 if (db->db_blkptr) { 1126 if (db->db_parent == dn->dn_dbuf) { 1127 /* db is pointed to by the dnode */ 1128 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 1129 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 1130 ASSERT(db->db_parent == NULL); 1131 else 1132 ASSERT(db->db_parent != NULL); 1133 if (db->db_blkid != DMU_SPILL_BLKID) 1134 ASSERT3P(db->db_blkptr, ==, 1135 &dn->dn_phys->dn_blkptr[db->db_blkid]); 1136 } else { 1137 /* db is pointed to by an indirect block */ 1138 int epb __maybe_unused = db->db_parent->db.db_size >> 1139 SPA_BLKPTRSHIFT; 1140 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 1141 ASSERT3U(db->db_parent->db.db_object, ==, 1142 db->db.db_object); 1143 /* 1144 * dnode_grow_indblksz() can make this fail if we don't 1145 * have the parent's rwlock. XXX indblksz no longer 1146 * grows. safe to do this now? 1147 */ 1148 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) { 1149 ASSERT3P(db->db_blkptr, ==, 1150 ((blkptr_t *)db->db_parent->db.db_data + 1151 db->db_blkid % epb)); 1152 } 1153 } 1154 } 1155 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 1156 (db->db_buf == NULL || db->db_buf->b_data) && 1157 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 1158 db->db_state != DB_FILL && !dn->dn_free_txg) { 1159 /* 1160 * If the blkptr isn't set but they have nonzero data, 1161 * it had better be dirty, otherwise we'll lose that 1162 * data when we evict this buffer. 1163 * 1164 * There is an exception to this rule for indirect blocks; in 1165 * this case, if the indirect block is a hole, we fill in a few 1166 * fields on each of the child blocks (importantly, birth time) 1167 * to prevent hole birth times from being lost when you 1168 * partially fill in a hole. 1169 */ 1170 if (db->db_dirtycnt == 0) { 1171 if (db->db_level == 0) { 1172 uint64_t *buf = db->db.db_data; 1173 int i; 1174 1175 for (i = 0; i < db->db.db_size >> 3; i++) { 1176 ASSERT(buf[i] == 0); 1177 } 1178 } else { 1179 blkptr_t *bps = db->db.db_data; 1180 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 1181 db->db.db_size); 1182 /* 1183 * We want to verify that all the blkptrs in the 1184 * indirect block are holes, but we may have 1185 * automatically set up a few fields for them. 1186 * We iterate through each blkptr and verify 1187 * they only have those fields set. 1188 */ 1189 for (int i = 0; 1190 i < db->db.db_size / sizeof (blkptr_t); 1191 i++) { 1192 blkptr_t *bp = &bps[i]; 1193 ASSERT(ZIO_CHECKSUM_IS_ZERO( 1194 &bp->blk_cksum)); 1195 ASSERT( 1196 DVA_IS_EMPTY(&bp->blk_dva[0]) && 1197 DVA_IS_EMPTY(&bp->blk_dva[1]) && 1198 DVA_IS_EMPTY(&bp->blk_dva[2])); 1199 ASSERT0(bp->blk_fill); 1200 ASSERT0(bp->blk_pad[0]); 1201 ASSERT0(bp->blk_pad[1]); 1202 ASSERT(!BP_IS_EMBEDDED(bp)); 1203 ASSERT(BP_IS_HOLE(bp)); 1204 ASSERT0(bp->blk_phys_birth); 1205 } 1206 } 1207 } 1208 } 1209 DB_DNODE_EXIT(db); 1210 } 1211 #endif 1212 1213 static void 1214 dbuf_clear_data(dmu_buf_impl_t *db) 1215 { 1216 ASSERT(MUTEX_HELD(&db->db_mtx)); 1217 dbuf_evict_user(db); 1218 ASSERT3P(db->db_buf, ==, NULL); 1219 db->db.db_data = NULL; 1220 if (db->db_state != DB_NOFILL) { 1221 db->db_state = DB_UNCACHED; 1222 DTRACE_SET_STATE(db, "clear data"); 1223 } 1224 } 1225 1226 static void 1227 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 1228 { 1229 ASSERT(MUTEX_HELD(&db->db_mtx)); 1230 ASSERT(buf != NULL); 1231 1232 db->db_buf = buf; 1233 ASSERT(buf->b_data != NULL); 1234 db->db.db_data = buf->b_data; 1235 } 1236 1237 static arc_buf_t * 1238 dbuf_alloc_arcbuf(dmu_buf_impl_t *db) 1239 { 1240 spa_t *spa = db->db_objset->os_spa; 1241 1242 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 1243 } 1244 1245 /* 1246 * Loan out an arc_buf for read. Return the loaned arc_buf. 1247 */ 1248 arc_buf_t * 1249 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 1250 { 1251 arc_buf_t *abuf; 1252 1253 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1254 mutex_enter(&db->db_mtx); 1255 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { 1256 int blksz = db->db.db_size; 1257 spa_t *spa = db->db_objset->os_spa; 1258 1259 mutex_exit(&db->db_mtx); 1260 abuf = arc_loan_buf(spa, B_FALSE, blksz); 1261 memcpy(abuf->b_data, db->db.db_data, blksz); 1262 } else { 1263 abuf = db->db_buf; 1264 arc_loan_inuse_buf(abuf, db); 1265 db->db_buf = NULL; 1266 dbuf_clear_data(db); 1267 mutex_exit(&db->db_mtx); 1268 } 1269 return (abuf); 1270 } 1271 1272 /* 1273 * Calculate which level n block references the data at the level 0 offset 1274 * provided. 1275 */ 1276 uint64_t 1277 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) 1278 { 1279 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 1280 /* 1281 * The level n blkid is equal to the level 0 blkid divided by 1282 * the number of level 0s in a level n block. 1283 * 1284 * The level 0 blkid is offset >> datablkshift = 1285 * offset / 2^datablkshift. 1286 * 1287 * The number of level 0s in a level n is the number of block 1288 * pointers in an indirect block, raised to the power of level. 1289 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 1290 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 1291 * 1292 * Thus, the level n blkid is: offset / 1293 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT)))) 1294 * = offset / 2^(datablkshift + level * 1295 * (indblkshift - SPA_BLKPTRSHIFT)) 1296 * = offset >> (datablkshift + level * 1297 * (indblkshift - SPA_BLKPTRSHIFT)) 1298 */ 1299 1300 const unsigned exp = dn->dn_datablkshift + 1301 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 1302 1303 if (exp >= 8 * sizeof (offset)) { 1304 /* This only happens on the highest indirection level */ 1305 ASSERT3U(level, ==, dn->dn_nlevels - 1); 1306 return (0); 1307 } 1308 1309 ASSERT3U(exp, <, 8 * sizeof (offset)); 1310 1311 return (offset >> exp); 1312 } else { 1313 ASSERT3U(offset, <, dn->dn_datablksz); 1314 return (0); 1315 } 1316 } 1317 1318 /* 1319 * This function is used to lock the parent of the provided dbuf. This should be 1320 * used when modifying or reading db_blkptr. 1321 */ 1322 db_lock_type_t 1323 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag) 1324 { 1325 enum db_lock_type ret = DLT_NONE; 1326 if (db->db_parent != NULL) { 1327 rw_enter(&db->db_parent->db_rwlock, rw); 1328 ret = DLT_PARENT; 1329 } else if (dmu_objset_ds(db->db_objset) != NULL) { 1330 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw, 1331 tag); 1332 ret = DLT_OBJSET; 1333 } 1334 /* 1335 * We only return a DLT_NONE lock when it's the top-most indirect block 1336 * of the meta-dnode of the MOS. 1337 */ 1338 return (ret); 1339 } 1340 1341 /* 1342 * We need to pass the lock type in because it's possible that the block will 1343 * move from being the topmost indirect block in a dnode (and thus, have no 1344 * parent) to not the top-most via an indirection increase. This would cause a 1345 * panic if we didn't pass the lock type in. 1346 */ 1347 void 1348 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag) 1349 { 1350 if (type == DLT_PARENT) 1351 rw_exit(&db->db_parent->db_rwlock); 1352 else if (type == DLT_OBJSET) 1353 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag); 1354 } 1355 1356 static void 1357 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1358 arc_buf_t *buf, void *vdb) 1359 { 1360 (void) zb, (void) bp; 1361 dmu_buf_impl_t *db = vdb; 1362 1363 mutex_enter(&db->db_mtx); 1364 ASSERT3U(db->db_state, ==, DB_READ); 1365 /* 1366 * All reads are synchronous, so we must have a hold on the dbuf 1367 */ 1368 ASSERT(zfs_refcount_count(&db->db_holds) > 0); 1369 ASSERT(db->db_buf == NULL); 1370 ASSERT(db->db.db_data == NULL); 1371 if (buf == NULL) { 1372 /* i/o error */ 1373 ASSERT(zio == NULL || zio->io_error != 0); 1374 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1375 ASSERT3P(db->db_buf, ==, NULL); 1376 db->db_state = DB_UNCACHED; 1377 DTRACE_SET_STATE(db, "i/o error"); 1378 } else if (db->db_level == 0 && db->db_freed_in_flight) { 1379 /* freed in flight */ 1380 ASSERT(zio == NULL || zio->io_error == 0); 1381 arc_release(buf, db); 1382 memset(buf->b_data, 0, db->db.db_size); 1383 arc_buf_freeze(buf); 1384 db->db_freed_in_flight = FALSE; 1385 dbuf_set_data(db, buf); 1386 db->db_state = DB_CACHED; 1387 DTRACE_SET_STATE(db, "freed in flight"); 1388 } else { 1389 /* success */ 1390 ASSERT(zio == NULL || zio->io_error == 0); 1391 dbuf_set_data(db, buf); 1392 db->db_state = DB_CACHED; 1393 DTRACE_SET_STATE(db, "successful read"); 1394 } 1395 cv_broadcast(&db->db_changed); 1396 dbuf_rele_and_unlock(db, NULL, B_FALSE); 1397 } 1398 1399 /* 1400 * Shortcut for performing reads on bonus dbufs. Returns 1401 * an error if we fail to verify the dnode associated with 1402 * a decrypted block. Otherwise success. 1403 */ 1404 static int 1405 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags) 1406 { 1407 int bonuslen, max_bonuslen, err; 1408 1409 err = dbuf_read_verify_dnode_crypt(db, flags); 1410 if (err) 1411 return (err); 1412 1413 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 1414 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1415 ASSERT(MUTEX_HELD(&db->db_mtx)); 1416 ASSERT(DB_DNODE_HELD(db)); 1417 ASSERT3U(bonuslen, <=, db->db.db_size); 1418 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); 1419 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 1420 if (bonuslen < max_bonuslen) 1421 memset(db->db.db_data, 0, max_bonuslen); 1422 if (bonuslen) 1423 memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen); 1424 db->db_state = DB_CACHED; 1425 DTRACE_SET_STATE(db, "bonus buffer filled"); 1426 return (0); 1427 } 1428 1429 static void 1430 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn) 1431 { 1432 blkptr_t *bps = db->db.db_data; 1433 uint32_t indbs = 1ULL << dn->dn_indblkshift; 1434 int n_bps = indbs >> SPA_BLKPTRSHIFT; 1435 1436 for (int i = 0; i < n_bps; i++) { 1437 blkptr_t *bp = &bps[i]; 1438 1439 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs); 1440 BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ? 1441 dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr)); 1442 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 1443 BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1); 1444 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 1445 } 1446 } 1447 1448 /* 1449 * Handle reads on dbufs that are holes, if necessary. This function 1450 * requires that the dbuf's mutex is held. Returns success (0) if action 1451 * was taken, ENOENT if no action was taken. 1452 */ 1453 static int 1454 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn) 1455 { 1456 ASSERT(MUTEX_HELD(&db->db_mtx)); 1457 1458 int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr); 1459 /* 1460 * For level 0 blocks only, if the above check fails: 1461 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 1462 * processes the delete record and clears the bp while we are waiting 1463 * for the dn_mtx (resulting in a "no" from block_freed). 1464 */ 1465 if (!is_hole && db->db_level == 0) { 1466 is_hole = dnode_block_freed(dn, db->db_blkid) || 1467 BP_IS_HOLE(db->db_blkptr); 1468 } 1469 1470 if (is_hole) { 1471 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1472 memset(db->db.db_data, 0, db->db.db_size); 1473 1474 if (db->db_blkptr != NULL && db->db_level > 0 && 1475 BP_IS_HOLE(db->db_blkptr) && 1476 db->db_blkptr->blk_birth != 0) { 1477 dbuf_handle_indirect_hole(db, dn); 1478 } 1479 db->db_state = DB_CACHED; 1480 DTRACE_SET_STATE(db, "hole read satisfied"); 1481 return (0); 1482 } 1483 return (ENOENT); 1484 } 1485 1486 /* 1487 * This function ensures that, when doing a decrypting read of a block, 1488 * we make sure we have decrypted the dnode associated with it. We must do 1489 * this so that we ensure we are fully authenticating the checksum-of-MACs 1490 * tree from the root of the objset down to this block. Indirect blocks are 1491 * always verified against their secure checksum-of-MACs assuming that the 1492 * dnode containing them is correct. Now that we are doing a decrypting read, 1493 * we can be sure that the key is loaded and verify that assumption. This is 1494 * especially important considering that we always read encrypted dnode 1495 * blocks as raw data (without verifying their MACs) to start, and 1496 * decrypt / authenticate them when we need to read an encrypted bonus buffer. 1497 */ 1498 static int 1499 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags) 1500 { 1501 int err = 0; 1502 objset_t *os = db->db_objset; 1503 arc_buf_t *dnode_abuf; 1504 dnode_t *dn; 1505 zbookmark_phys_t zb; 1506 1507 ASSERT(MUTEX_HELD(&db->db_mtx)); 1508 1509 if ((flags & DB_RF_NO_DECRYPT) != 0 || 1510 !os->os_encrypted || os->os_raw_receive) 1511 return (0); 1512 1513 DB_DNODE_ENTER(db); 1514 dn = DB_DNODE(db); 1515 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL; 1516 1517 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) { 1518 DB_DNODE_EXIT(db); 1519 return (0); 1520 } 1521 1522 SET_BOOKMARK(&zb, dmu_objset_id(os), 1523 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid); 1524 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE); 1525 1526 /* 1527 * An error code of EACCES tells us that the key is still not 1528 * available. This is ok if we are only reading authenticated 1529 * (and therefore non-encrypted) blocks. 1530 */ 1531 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && 1532 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || 1533 (db->db_blkid == DMU_BONUS_BLKID && 1534 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) 1535 err = 0; 1536 1537 DB_DNODE_EXIT(db); 1538 1539 return (err); 1540 } 1541 1542 /* 1543 * Drops db_mtx and the parent lock specified by dblt and tag before 1544 * returning. 1545 */ 1546 static int 1547 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags, 1548 db_lock_type_t dblt, const void *tag) 1549 { 1550 dnode_t *dn; 1551 zbookmark_phys_t zb; 1552 uint32_t aflags = ARC_FLAG_NOWAIT; 1553 int err, zio_flags; 1554 1555 DB_DNODE_ENTER(db); 1556 dn = DB_DNODE(db); 1557 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1558 ASSERT(MUTEX_HELD(&db->db_mtx)); 1559 ASSERT(db->db_state == DB_UNCACHED); 1560 ASSERT(db->db_buf == NULL); 1561 ASSERT(db->db_parent == NULL || 1562 RW_LOCK_HELD(&db->db_parent->db_rwlock)); 1563 1564 if (db->db_blkid == DMU_BONUS_BLKID) { 1565 err = dbuf_read_bonus(db, dn, flags); 1566 goto early_unlock; 1567 } 1568 1569 err = dbuf_read_hole(db, dn); 1570 if (err == 0) 1571 goto early_unlock; 1572 1573 /* 1574 * Any attempt to read a redacted block should result in an error. This 1575 * will never happen under normal conditions, but can be useful for 1576 * debugging purposes. 1577 */ 1578 if (BP_IS_REDACTED(db->db_blkptr)) { 1579 ASSERT(dsl_dataset_feature_is_active( 1580 db->db_objset->os_dsl_dataset, 1581 SPA_FEATURE_REDACTED_DATASETS)); 1582 err = SET_ERROR(EIO); 1583 goto early_unlock; 1584 } 1585 1586 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1587 db->db.db_object, db->db_level, db->db_blkid); 1588 1589 /* 1590 * All bps of an encrypted os should have the encryption bit set. 1591 * If this is not true it indicates tampering and we report an error. 1592 */ 1593 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) { 1594 spa_log_error(db->db_objset->os_spa, &zb); 1595 zfs_panic_recover("unencrypted block in encrypted " 1596 "object set %llu", dmu_objset_id(db->db_objset)); 1597 err = SET_ERROR(EIO); 1598 goto early_unlock; 1599 } 1600 1601 err = dbuf_read_verify_dnode_crypt(db, flags); 1602 if (err != 0) 1603 goto early_unlock; 1604 1605 DB_DNODE_EXIT(db); 1606 1607 db->db_state = DB_READ; 1608 DTRACE_SET_STATE(db, "read issued"); 1609 mutex_exit(&db->db_mtx); 1610 1611 if (!DBUF_IS_CACHEABLE(db)) 1612 aflags |= ARC_FLAG_UNCACHED; 1613 else if (dbuf_is_l2cacheable(db)) 1614 aflags |= ARC_FLAG_L2CACHE; 1615 1616 dbuf_add_ref(db, NULL); 1617 1618 zio_flags = (flags & DB_RF_CANFAIL) ? 1619 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; 1620 1621 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) 1622 zio_flags |= ZIO_FLAG_RAW; 1623 /* 1624 * The zio layer will copy the provided blkptr later, but we need to 1625 * do this now so that we can release the parent's rwlock. We have to 1626 * do that now so that if dbuf_read_done is called synchronously (on 1627 * an l1 cache hit) we don't acquire the db_mtx while holding the 1628 * parent's rwlock, which would be a lock ordering violation. 1629 */ 1630 blkptr_t bp = *db->db_blkptr; 1631 dmu_buf_unlock_parent(db, dblt, tag); 1632 (void) arc_read(zio, db->db_objset->os_spa, &bp, 1633 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, 1634 &aflags, &zb); 1635 return (err); 1636 early_unlock: 1637 DB_DNODE_EXIT(db); 1638 mutex_exit(&db->db_mtx); 1639 dmu_buf_unlock_parent(db, dblt, tag); 1640 return (err); 1641 } 1642 1643 /* 1644 * This is our just-in-time copy function. It makes a copy of buffers that 1645 * have been modified in a previous transaction group before we access them in 1646 * the current active group. 1647 * 1648 * This function is used in three places: when we are dirtying a buffer for the 1649 * first time in a txg, when we are freeing a range in a dnode that includes 1650 * this buffer, and when we are accessing a buffer which was received compressed 1651 * and later referenced in a WRITE_BYREF record. 1652 * 1653 * Note that when we are called from dbuf_free_range() we do not put a hold on 1654 * the buffer, we just traverse the active dbuf list for the dnode. 1655 */ 1656 static void 1657 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1658 { 1659 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 1660 1661 ASSERT(MUTEX_HELD(&db->db_mtx)); 1662 ASSERT(db->db.db_data != NULL); 1663 ASSERT(db->db_level == 0); 1664 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1665 1666 if (dr == NULL || 1667 (dr->dt.dl.dr_data != 1668 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1669 return; 1670 1671 /* 1672 * If the last dirty record for this dbuf has not yet synced 1673 * and its referencing the dbuf data, either: 1674 * reset the reference to point to a new copy, 1675 * or (if there a no active holders) 1676 * just null out the current db_data pointer. 1677 */ 1678 ASSERT3U(dr->dr_txg, >=, txg - 2); 1679 if (db->db_blkid == DMU_BONUS_BLKID) { 1680 dnode_t *dn = DB_DNODE(db); 1681 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1682 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); 1683 arc_space_consume(bonuslen, ARC_SPACE_BONUS); 1684 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen); 1685 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1686 dnode_t *dn = DB_DNODE(db); 1687 int size = arc_buf_size(db->db_buf); 1688 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1689 spa_t *spa = db->db_objset->os_spa; 1690 enum zio_compress compress_type = 1691 arc_get_compression(db->db_buf); 1692 uint8_t complevel = arc_get_complevel(db->db_buf); 1693 1694 if (arc_is_encrypted(db->db_buf)) { 1695 boolean_t byteorder; 1696 uint8_t salt[ZIO_DATA_SALT_LEN]; 1697 uint8_t iv[ZIO_DATA_IV_LEN]; 1698 uint8_t mac[ZIO_DATA_MAC_LEN]; 1699 1700 arc_get_raw_params(db->db_buf, &byteorder, salt, 1701 iv, mac); 1702 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, 1703 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, 1704 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), 1705 compress_type, complevel); 1706 } else if (compress_type != ZIO_COMPRESS_OFF) { 1707 ASSERT3U(type, ==, ARC_BUFC_DATA); 1708 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1709 size, arc_buf_lsize(db->db_buf), compress_type, 1710 complevel); 1711 } else { 1712 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1713 } 1714 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size); 1715 } else { 1716 db->db_buf = NULL; 1717 dbuf_clear_data(db); 1718 } 1719 } 1720 1721 int 1722 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1723 { 1724 int err = 0; 1725 boolean_t prefetch; 1726 dnode_t *dn; 1727 1728 /* 1729 * We don't have to hold the mutex to check db_state because it 1730 * can't be freed while we have a hold on the buffer. 1731 */ 1732 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1733 1734 if (db->db_state == DB_NOFILL) 1735 return (SET_ERROR(EIO)); 1736 1737 DB_DNODE_ENTER(db); 1738 dn = DB_DNODE(db); 1739 1740 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1741 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL; 1742 1743 mutex_enter(&db->db_mtx); 1744 if (flags & DB_RF_PARTIAL_FIRST) 1745 db->db_partial_read = B_TRUE; 1746 else if (!(flags & DB_RF_PARTIAL_MORE)) 1747 db->db_partial_read = B_FALSE; 1748 if (db->db_state == DB_CACHED) { 1749 /* 1750 * Ensure that this block's dnode has been decrypted if 1751 * the caller has requested decrypted data. 1752 */ 1753 err = dbuf_read_verify_dnode_crypt(db, flags); 1754 1755 /* 1756 * If the arc buf is compressed or encrypted and the caller 1757 * requested uncompressed data, we need to untransform it 1758 * before returning. We also call arc_untransform() on any 1759 * unauthenticated blocks, which will verify their MAC if 1760 * the key is now available. 1761 */ 1762 if (err == 0 && db->db_buf != NULL && 1763 (flags & DB_RF_NO_DECRYPT) == 0 && 1764 (arc_is_encrypted(db->db_buf) || 1765 arc_is_unauthenticated(db->db_buf) || 1766 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 1767 spa_t *spa = dn->dn_objset->os_spa; 1768 zbookmark_phys_t zb; 1769 1770 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1771 db->db.db_object, db->db_level, db->db_blkid); 1772 dbuf_fix_old_data(db, spa_syncing_txg(spa)); 1773 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 1774 dbuf_set_data(db, db->db_buf); 1775 } 1776 mutex_exit(&db->db_mtx); 1777 if (err == 0 && prefetch) { 1778 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1779 B_FALSE, flags & DB_RF_HAVESTRUCT); 1780 } 1781 DB_DNODE_EXIT(db); 1782 DBUF_STAT_BUMP(hash_hits); 1783 } else if (db->db_state == DB_UNCACHED) { 1784 boolean_t need_wait = B_FALSE; 1785 1786 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 1787 1788 if (zio == NULL && 1789 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1790 spa_t *spa = dn->dn_objset->os_spa; 1791 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1792 need_wait = B_TRUE; 1793 } 1794 err = dbuf_read_impl(db, zio, flags, dblt, FTAG); 1795 /* 1796 * dbuf_read_impl has dropped db_mtx and our parent's rwlock 1797 * for us 1798 */ 1799 if (!err && prefetch) { 1800 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1801 db->db_state != DB_CACHED, 1802 flags & DB_RF_HAVESTRUCT); 1803 } 1804 1805 DB_DNODE_EXIT(db); 1806 DBUF_STAT_BUMP(hash_misses); 1807 1808 /* 1809 * If we created a zio_root we must execute it to avoid 1810 * leaking it, even if it isn't attached to any work due 1811 * to an error in dbuf_read_impl(). 1812 */ 1813 if (need_wait) { 1814 if (err == 0) 1815 err = zio_wait(zio); 1816 else 1817 VERIFY0(zio_wait(zio)); 1818 } 1819 } else { 1820 /* 1821 * Another reader came in while the dbuf was in flight 1822 * between UNCACHED and CACHED. Either a writer will finish 1823 * writing the buffer (sending the dbuf to CACHED) or the 1824 * first reader's request will reach the read_done callback 1825 * and send the dbuf to CACHED. Otherwise, a failure 1826 * occurred and the dbuf went to UNCACHED. 1827 */ 1828 mutex_exit(&db->db_mtx); 1829 if (prefetch) { 1830 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1831 B_TRUE, flags & DB_RF_HAVESTRUCT); 1832 } 1833 DB_DNODE_EXIT(db); 1834 DBUF_STAT_BUMP(hash_misses); 1835 1836 /* Skip the wait per the caller's request. */ 1837 if ((flags & DB_RF_NEVERWAIT) == 0) { 1838 mutex_enter(&db->db_mtx); 1839 while (db->db_state == DB_READ || 1840 db->db_state == DB_FILL) { 1841 ASSERT(db->db_state == DB_READ || 1842 (flags & DB_RF_HAVESTRUCT) == 0); 1843 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1844 db, zio_t *, zio); 1845 cv_wait(&db->db_changed, &db->db_mtx); 1846 } 1847 if (db->db_state == DB_UNCACHED) 1848 err = SET_ERROR(EIO); 1849 mutex_exit(&db->db_mtx); 1850 } 1851 } 1852 1853 return (err); 1854 } 1855 1856 static void 1857 dbuf_noread(dmu_buf_impl_t *db) 1858 { 1859 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1860 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1861 mutex_enter(&db->db_mtx); 1862 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1863 cv_wait(&db->db_changed, &db->db_mtx); 1864 if (db->db_state == DB_UNCACHED) { 1865 ASSERT(db->db_buf == NULL); 1866 ASSERT(db->db.db_data == NULL); 1867 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1868 db->db_state = DB_FILL; 1869 DTRACE_SET_STATE(db, "assigning filled buffer"); 1870 } else if (db->db_state == DB_NOFILL) { 1871 dbuf_clear_data(db); 1872 } else { 1873 ASSERT3U(db->db_state, ==, DB_CACHED); 1874 } 1875 mutex_exit(&db->db_mtx); 1876 } 1877 1878 void 1879 dbuf_unoverride(dbuf_dirty_record_t *dr) 1880 { 1881 dmu_buf_impl_t *db = dr->dr_dbuf; 1882 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1883 uint64_t txg = dr->dr_txg; 1884 1885 ASSERT(MUTEX_HELD(&db->db_mtx)); 1886 /* 1887 * This assert is valid because dmu_sync() expects to be called by 1888 * a zilog's get_data while holding a range lock. This call only 1889 * comes from dbuf_dirty() callers who must also hold a range lock. 1890 */ 1891 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1892 ASSERT(db->db_level == 0); 1893 1894 if (db->db_blkid == DMU_BONUS_BLKID || 1895 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1896 return; 1897 1898 ASSERT(db->db_data_pending != dr); 1899 1900 /* free this block */ 1901 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1902 zio_free(db->db_objset->os_spa, txg, bp); 1903 1904 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1905 dr->dt.dl.dr_nopwrite = B_FALSE; 1906 dr->dt.dl.dr_has_raw_params = B_FALSE; 1907 1908 /* 1909 * Release the already-written buffer, so we leave it in 1910 * a consistent dirty state. Note that all callers are 1911 * modifying the buffer, so they will immediately do 1912 * another (redundant) arc_release(). Therefore, leave 1913 * the buf thawed to save the effort of freezing & 1914 * immediately re-thawing it. 1915 */ 1916 arc_release(dr->dt.dl.dr_data, db); 1917 } 1918 1919 /* 1920 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1921 * data blocks in the free range, so that any future readers will find 1922 * empty blocks. 1923 */ 1924 void 1925 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1926 dmu_tx_t *tx) 1927 { 1928 dmu_buf_impl_t *db_search; 1929 dmu_buf_impl_t *db, *db_next; 1930 uint64_t txg = tx->tx_txg; 1931 avl_index_t where; 1932 dbuf_dirty_record_t *dr; 1933 1934 if (end_blkid > dn->dn_maxblkid && 1935 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1936 end_blkid = dn->dn_maxblkid; 1937 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid, 1938 (u_longlong_t)end_blkid); 1939 1940 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); 1941 db_search->db_level = 0; 1942 db_search->db_blkid = start_blkid; 1943 db_search->db_state = DB_SEARCH; 1944 1945 mutex_enter(&dn->dn_dbufs_mtx); 1946 db = avl_find(&dn->dn_dbufs, db_search, &where); 1947 ASSERT3P(db, ==, NULL); 1948 1949 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1950 1951 for (; db != NULL; db = db_next) { 1952 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1953 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1954 1955 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1956 break; 1957 } 1958 ASSERT3U(db->db_blkid, >=, start_blkid); 1959 1960 /* found a level 0 buffer in the range */ 1961 mutex_enter(&db->db_mtx); 1962 if (dbuf_undirty(db, tx)) { 1963 /* mutex has been dropped and dbuf destroyed */ 1964 continue; 1965 } 1966 1967 if (db->db_state == DB_UNCACHED || 1968 db->db_state == DB_NOFILL || 1969 db->db_state == DB_EVICTING) { 1970 ASSERT(db->db.db_data == NULL); 1971 mutex_exit(&db->db_mtx); 1972 continue; 1973 } 1974 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1975 /* will be handled in dbuf_read_done or dbuf_rele */ 1976 db->db_freed_in_flight = TRUE; 1977 mutex_exit(&db->db_mtx); 1978 continue; 1979 } 1980 if (zfs_refcount_count(&db->db_holds) == 0) { 1981 ASSERT(db->db_buf); 1982 dbuf_destroy(db); 1983 continue; 1984 } 1985 /* The dbuf is referenced */ 1986 1987 dr = list_head(&db->db_dirty_records); 1988 if (dr != NULL) { 1989 if (dr->dr_txg == txg) { 1990 /* 1991 * This buffer is "in-use", re-adjust the file 1992 * size to reflect that this buffer may 1993 * contain new data when we sync. 1994 */ 1995 if (db->db_blkid != DMU_SPILL_BLKID && 1996 db->db_blkid > dn->dn_maxblkid) 1997 dn->dn_maxblkid = db->db_blkid; 1998 dbuf_unoverride(dr); 1999 } else { 2000 /* 2001 * This dbuf is not dirty in the open context. 2002 * Either uncache it (if its not referenced in 2003 * the open context) or reset its contents to 2004 * empty. 2005 */ 2006 dbuf_fix_old_data(db, txg); 2007 } 2008 } 2009 /* clear the contents if its cached */ 2010 if (db->db_state == DB_CACHED) { 2011 ASSERT(db->db.db_data != NULL); 2012 arc_release(db->db_buf, db); 2013 rw_enter(&db->db_rwlock, RW_WRITER); 2014 memset(db->db.db_data, 0, db->db.db_size); 2015 rw_exit(&db->db_rwlock); 2016 arc_buf_freeze(db->db_buf); 2017 } 2018 2019 mutex_exit(&db->db_mtx); 2020 } 2021 2022 mutex_exit(&dn->dn_dbufs_mtx); 2023 kmem_free(db_search, sizeof (dmu_buf_impl_t)); 2024 } 2025 2026 void 2027 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 2028 { 2029 arc_buf_t *buf, *old_buf; 2030 dbuf_dirty_record_t *dr; 2031 int osize = db->db.db_size; 2032 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2033 dnode_t *dn; 2034 2035 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2036 2037 DB_DNODE_ENTER(db); 2038 dn = DB_DNODE(db); 2039 2040 /* 2041 * XXX we should be doing a dbuf_read, checking the return 2042 * value and returning that up to our callers 2043 */ 2044 dmu_buf_will_dirty(&db->db, tx); 2045 2046 /* create the data buffer for the new block */ 2047 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 2048 2049 /* copy old block data to the new block */ 2050 old_buf = db->db_buf; 2051 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size)); 2052 /* zero the remainder */ 2053 if (size > osize) 2054 memset((uint8_t *)buf->b_data + osize, 0, size - osize); 2055 2056 mutex_enter(&db->db_mtx); 2057 dbuf_set_data(db, buf); 2058 arc_buf_destroy(old_buf, db); 2059 db->db.db_size = size; 2060 2061 dr = list_head(&db->db_dirty_records); 2062 /* dirty record added by dmu_buf_will_dirty() */ 2063 VERIFY(dr != NULL); 2064 if (db->db_level == 0) 2065 dr->dt.dl.dr_data = buf; 2066 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2067 ASSERT3U(dr->dr_accounted, ==, osize); 2068 dr->dr_accounted = size; 2069 mutex_exit(&db->db_mtx); 2070 2071 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 2072 DB_DNODE_EXIT(db); 2073 } 2074 2075 void 2076 dbuf_release_bp(dmu_buf_impl_t *db) 2077 { 2078 objset_t *os __maybe_unused = db->db_objset; 2079 2080 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 2081 ASSERT(arc_released(os->os_phys_buf) || 2082 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 2083 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 2084 2085 (void) arc_release(db->db_buf, db); 2086 } 2087 2088 /* 2089 * We already have a dirty record for this TXG, and we are being 2090 * dirtied again. 2091 */ 2092 static void 2093 dbuf_redirty(dbuf_dirty_record_t *dr) 2094 { 2095 dmu_buf_impl_t *db = dr->dr_dbuf; 2096 2097 ASSERT(MUTEX_HELD(&db->db_mtx)); 2098 2099 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 2100 /* 2101 * If this buffer has already been written out, 2102 * we now need to reset its state. 2103 */ 2104 dbuf_unoverride(dr); 2105 if (db->db.db_object != DMU_META_DNODE_OBJECT && 2106 db->db_state != DB_NOFILL) { 2107 /* Already released on initial dirty, so just thaw. */ 2108 ASSERT(arc_released(db->db_buf)); 2109 arc_buf_thaw(db->db_buf); 2110 } 2111 } 2112 } 2113 2114 dbuf_dirty_record_t * 2115 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx) 2116 { 2117 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2118 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid); 2119 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE); 2120 ASSERT(dn->dn_maxblkid >= blkid); 2121 2122 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP); 2123 list_link_init(&dr->dr_dirty_node); 2124 list_link_init(&dr->dr_dbuf_node); 2125 dr->dr_dnode = dn; 2126 dr->dr_txg = tx->tx_txg; 2127 dr->dt.dll.dr_blkid = blkid; 2128 dr->dr_accounted = dn->dn_datablksz; 2129 2130 /* 2131 * There should not be any dbuf for the block that we're dirtying. 2132 * Otherwise the buffer contents could be inconsistent between the 2133 * dbuf and the lightweight dirty record. 2134 */ 2135 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid, 2136 NULL)); 2137 2138 mutex_enter(&dn->dn_mtx); 2139 int txgoff = tx->tx_txg & TXG_MASK; 2140 if (dn->dn_free_ranges[txgoff] != NULL) { 2141 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1); 2142 } 2143 2144 if (dn->dn_nlevels == 1) { 2145 ASSERT3U(blkid, <, dn->dn_nblkptr); 2146 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2147 mutex_exit(&dn->dn_mtx); 2148 rw_exit(&dn->dn_struct_rwlock); 2149 dnode_setdirty(dn, tx); 2150 } else { 2151 mutex_exit(&dn->dn_mtx); 2152 2153 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2154 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn, 2155 1, blkid >> epbs, FTAG); 2156 rw_exit(&dn->dn_struct_rwlock); 2157 if (parent_db == NULL) { 2158 kmem_free(dr, sizeof (*dr)); 2159 return (NULL); 2160 } 2161 int err = dbuf_read(parent_db, NULL, 2162 (DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2163 if (err != 0) { 2164 dbuf_rele(parent_db, FTAG); 2165 kmem_free(dr, sizeof (*dr)); 2166 return (NULL); 2167 } 2168 2169 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx); 2170 dbuf_rele(parent_db, FTAG); 2171 mutex_enter(&parent_dr->dt.di.dr_mtx); 2172 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg); 2173 list_insert_tail(&parent_dr->dt.di.dr_children, dr); 2174 mutex_exit(&parent_dr->dt.di.dr_mtx); 2175 dr->dr_parent = parent_dr; 2176 } 2177 2178 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx); 2179 2180 return (dr); 2181 } 2182 2183 dbuf_dirty_record_t * 2184 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2185 { 2186 dnode_t *dn; 2187 objset_t *os; 2188 dbuf_dirty_record_t *dr, *dr_next, *dr_head; 2189 int txgoff = tx->tx_txg & TXG_MASK; 2190 boolean_t drop_struct_rwlock = B_FALSE; 2191 2192 ASSERT(tx->tx_txg != 0); 2193 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2194 DMU_TX_DIRTY_BUF(tx, db); 2195 2196 DB_DNODE_ENTER(db); 2197 dn = DB_DNODE(db); 2198 /* 2199 * Shouldn't dirty a regular buffer in syncing context. Private 2200 * objects may be dirtied in syncing context, but only if they 2201 * were already pre-dirtied in open context. 2202 */ 2203 #ifdef ZFS_DEBUG 2204 if (dn->dn_objset->os_dsl_dataset != NULL) { 2205 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 2206 RW_READER, FTAG); 2207 } 2208 ASSERT(!dmu_tx_is_syncing(tx) || 2209 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 2210 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2211 dn->dn_objset->os_dsl_dataset == NULL); 2212 if (dn->dn_objset->os_dsl_dataset != NULL) 2213 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 2214 #endif 2215 /* 2216 * We make this assert for private objects as well, but after we 2217 * check if we're already dirty. They are allowed to re-dirty 2218 * in syncing context. 2219 */ 2220 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2221 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2222 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2223 2224 mutex_enter(&db->db_mtx); 2225 /* 2226 * XXX make this true for indirects too? The problem is that 2227 * transactions created with dmu_tx_create_assigned() from 2228 * syncing context don't bother holding ahead. 2229 */ 2230 ASSERT(db->db_level != 0 || 2231 db->db_state == DB_CACHED || db->db_state == DB_FILL || 2232 db->db_state == DB_NOFILL); 2233 2234 mutex_enter(&dn->dn_mtx); 2235 dnode_set_dirtyctx(dn, tx, db); 2236 if (tx->tx_txg > dn->dn_dirty_txg) 2237 dn->dn_dirty_txg = tx->tx_txg; 2238 mutex_exit(&dn->dn_mtx); 2239 2240 if (db->db_blkid == DMU_SPILL_BLKID) 2241 dn->dn_have_spill = B_TRUE; 2242 2243 /* 2244 * If this buffer is already dirty, we're done. 2245 */ 2246 dr_head = list_head(&db->db_dirty_records); 2247 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg || 2248 db->db.db_object == DMU_META_DNODE_OBJECT); 2249 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg); 2250 if (dr_next && dr_next->dr_txg == tx->tx_txg) { 2251 DB_DNODE_EXIT(db); 2252 2253 dbuf_redirty(dr_next); 2254 mutex_exit(&db->db_mtx); 2255 return (dr_next); 2256 } 2257 2258 /* 2259 * Only valid if not already dirty. 2260 */ 2261 ASSERT(dn->dn_object == 0 || 2262 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2263 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2264 2265 ASSERT3U(dn->dn_nlevels, >, db->db_level); 2266 2267 /* 2268 * We should only be dirtying in syncing context if it's the 2269 * mos or we're initializing the os or it's a special object. 2270 * However, we are allowed to dirty in syncing context provided 2271 * we already dirtied it in open context. Hence we must make 2272 * this assertion only if we're not already dirty. 2273 */ 2274 os = dn->dn_objset; 2275 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 2276 #ifdef ZFS_DEBUG 2277 if (dn->dn_objset->os_dsl_dataset != NULL) 2278 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 2279 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2280 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 2281 if (dn->dn_objset->os_dsl_dataset != NULL) 2282 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 2283 #endif 2284 ASSERT(db->db.db_size != 0); 2285 2286 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2287 2288 if (db->db_blkid != DMU_BONUS_BLKID) { 2289 dmu_objset_willuse_space(os, db->db.db_size, tx); 2290 } 2291 2292 /* 2293 * If this buffer is dirty in an old transaction group we need 2294 * to make a copy of it so that the changes we make in this 2295 * transaction group won't leak out when we sync the older txg. 2296 */ 2297 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 2298 list_link_init(&dr->dr_dirty_node); 2299 list_link_init(&dr->dr_dbuf_node); 2300 dr->dr_dnode = dn; 2301 if (db->db_level == 0) { 2302 void *data_old = db->db_buf; 2303 2304 if (db->db_state != DB_NOFILL) { 2305 if (db->db_blkid == DMU_BONUS_BLKID) { 2306 dbuf_fix_old_data(db, tx->tx_txg); 2307 data_old = db->db.db_data; 2308 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 2309 /* 2310 * Release the data buffer from the cache so 2311 * that we can modify it without impacting 2312 * possible other users of this cached data 2313 * block. Note that indirect blocks and 2314 * private objects are not released until the 2315 * syncing state (since they are only modified 2316 * then). 2317 */ 2318 arc_release(db->db_buf, db); 2319 dbuf_fix_old_data(db, tx->tx_txg); 2320 data_old = db->db_buf; 2321 } 2322 ASSERT(data_old != NULL); 2323 } 2324 dr->dt.dl.dr_data = data_old; 2325 } else { 2326 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); 2327 list_create(&dr->dt.di.dr_children, 2328 sizeof (dbuf_dirty_record_t), 2329 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 2330 } 2331 if (db->db_blkid != DMU_BONUS_BLKID) 2332 dr->dr_accounted = db->db.db_size; 2333 dr->dr_dbuf = db; 2334 dr->dr_txg = tx->tx_txg; 2335 list_insert_before(&db->db_dirty_records, dr_next, dr); 2336 2337 /* 2338 * We could have been freed_in_flight between the dbuf_noread 2339 * and dbuf_dirty. We win, as though the dbuf_noread() had 2340 * happened after the free. 2341 */ 2342 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2343 db->db_blkid != DMU_SPILL_BLKID) { 2344 mutex_enter(&dn->dn_mtx); 2345 if (dn->dn_free_ranges[txgoff] != NULL) { 2346 range_tree_clear(dn->dn_free_ranges[txgoff], 2347 db->db_blkid, 1); 2348 } 2349 mutex_exit(&dn->dn_mtx); 2350 db->db_freed_in_flight = FALSE; 2351 } 2352 2353 /* 2354 * This buffer is now part of this txg 2355 */ 2356 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 2357 db->db_dirtycnt += 1; 2358 ASSERT3U(db->db_dirtycnt, <=, 3); 2359 2360 mutex_exit(&db->db_mtx); 2361 2362 if (db->db_blkid == DMU_BONUS_BLKID || 2363 db->db_blkid == DMU_SPILL_BLKID) { 2364 mutex_enter(&dn->dn_mtx); 2365 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2366 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2367 mutex_exit(&dn->dn_mtx); 2368 dnode_setdirty(dn, tx); 2369 DB_DNODE_EXIT(db); 2370 return (dr); 2371 } 2372 2373 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 2374 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2375 drop_struct_rwlock = B_TRUE; 2376 } 2377 2378 /* 2379 * If we are overwriting a dedup BP, then unless it is snapshotted, 2380 * when we get to syncing context we will need to decrement its 2381 * refcount in the DDT. Prefetch the relevant DDT block so that 2382 * syncing context won't have to wait for the i/o. 2383 */ 2384 if (db->db_blkptr != NULL) { 2385 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 2386 ddt_prefetch(os->os_spa, db->db_blkptr); 2387 dmu_buf_unlock_parent(db, dblt, FTAG); 2388 } 2389 2390 /* 2391 * We need to hold the dn_struct_rwlock to make this assertion, 2392 * because it protects dn_phys / dn_next_nlevels from changing. 2393 */ 2394 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 2395 dn->dn_phys->dn_nlevels > db->db_level || 2396 dn->dn_next_nlevels[txgoff] > db->db_level || 2397 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 2398 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 2399 2400 2401 if (db->db_level == 0) { 2402 ASSERT(!db->db_objset->os_raw_receive || 2403 dn->dn_maxblkid >= db->db_blkid); 2404 dnode_new_blkid(dn, db->db_blkid, tx, 2405 drop_struct_rwlock, B_FALSE); 2406 ASSERT(dn->dn_maxblkid >= db->db_blkid); 2407 } 2408 2409 if (db->db_level+1 < dn->dn_nlevels) { 2410 dmu_buf_impl_t *parent = db->db_parent; 2411 dbuf_dirty_record_t *di; 2412 int parent_held = FALSE; 2413 2414 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 2415 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2416 parent = dbuf_hold_level(dn, db->db_level + 1, 2417 db->db_blkid >> epbs, FTAG); 2418 ASSERT(parent != NULL); 2419 parent_held = TRUE; 2420 } 2421 if (drop_struct_rwlock) 2422 rw_exit(&dn->dn_struct_rwlock); 2423 ASSERT3U(db->db_level + 1, ==, parent->db_level); 2424 di = dbuf_dirty(parent, tx); 2425 if (parent_held) 2426 dbuf_rele(parent, FTAG); 2427 2428 mutex_enter(&db->db_mtx); 2429 /* 2430 * Since we've dropped the mutex, it's possible that 2431 * dbuf_undirty() might have changed this out from under us. 2432 */ 2433 if (list_head(&db->db_dirty_records) == dr || 2434 dn->dn_object == DMU_META_DNODE_OBJECT) { 2435 mutex_enter(&di->dt.di.dr_mtx); 2436 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 2437 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2438 list_insert_tail(&di->dt.di.dr_children, dr); 2439 mutex_exit(&di->dt.di.dr_mtx); 2440 dr->dr_parent = di; 2441 } 2442 mutex_exit(&db->db_mtx); 2443 } else { 2444 ASSERT(db->db_level + 1 == dn->dn_nlevels); 2445 ASSERT(db->db_blkid < dn->dn_nblkptr); 2446 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 2447 mutex_enter(&dn->dn_mtx); 2448 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2449 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2450 mutex_exit(&dn->dn_mtx); 2451 if (drop_struct_rwlock) 2452 rw_exit(&dn->dn_struct_rwlock); 2453 } 2454 2455 dnode_setdirty(dn, tx); 2456 DB_DNODE_EXIT(db); 2457 return (dr); 2458 } 2459 2460 static void 2461 dbuf_undirty_bonus(dbuf_dirty_record_t *dr) 2462 { 2463 dmu_buf_impl_t *db = dr->dr_dbuf; 2464 2465 if (dr->dt.dl.dr_data != db->db.db_data) { 2466 struct dnode *dn = dr->dr_dnode; 2467 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 2468 2469 kmem_free(dr->dt.dl.dr_data, max_bonuslen); 2470 arc_space_return(max_bonuslen, ARC_SPACE_BONUS); 2471 } 2472 db->db_data_pending = NULL; 2473 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 2474 list_remove(&db->db_dirty_records, dr); 2475 if (dr->dr_dbuf->db_level != 0) { 2476 mutex_destroy(&dr->dt.di.dr_mtx); 2477 list_destroy(&dr->dt.di.dr_children); 2478 } 2479 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2480 ASSERT3U(db->db_dirtycnt, >, 0); 2481 db->db_dirtycnt -= 1; 2482 } 2483 2484 /* 2485 * Undirty a buffer in the transaction group referenced by the given 2486 * transaction. Return whether this evicted the dbuf. 2487 */ 2488 static boolean_t 2489 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2490 { 2491 uint64_t txg = tx->tx_txg; 2492 2493 ASSERT(txg != 0); 2494 2495 /* 2496 * Due to our use of dn_nlevels below, this can only be called 2497 * in open context, unless we are operating on the MOS. 2498 * From syncing context, dn_nlevels may be different from the 2499 * dn_nlevels used when dbuf was dirtied. 2500 */ 2501 ASSERT(db->db_objset == 2502 dmu_objset_pool(db->db_objset)->dp_meta_objset || 2503 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 2504 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2505 ASSERT0(db->db_level); 2506 ASSERT(MUTEX_HELD(&db->db_mtx)); 2507 2508 /* 2509 * If this buffer is not dirty, we're done. 2510 */ 2511 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg); 2512 if (dr == NULL) 2513 return (B_FALSE); 2514 ASSERT(dr->dr_dbuf == db); 2515 2516 dnode_t *dn = dr->dr_dnode; 2517 2518 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2519 2520 ASSERT(db->db.db_size != 0); 2521 2522 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 2523 dr->dr_accounted, txg); 2524 2525 list_remove(&db->db_dirty_records, dr); 2526 2527 /* 2528 * Note that there are three places in dbuf_dirty() 2529 * where this dirty record may be put on a list. 2530 * Make sure to do a list_remove corresponding to 2531 * every one of those list_insert calls. 2532 */ 2533 if (dr->dr_parent) { 2534 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 2535 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 2536 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 2537 } else if (db->db_blkid == DMU_SPILL_BLKID || 2538 db->db_level + 1 == dn->dn_nlevels) { 2539 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 2540 mutex_enter(&dn->dn_mtx); 2541 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 2542 mutex_exit(&dn->dn_mtx); 2543 } 2544 2545 if (db->db_state != DB_NOFILL) { 2546 dbuf_unoverride(dr); 2547 2548 ASSERT(db->db_buf != NULL); 2549 ASSERT(dr->dt.dl.dr_data != NULL); 2550 if (dr->dt.dl.dr_data != db->db_buf) 2551 arc_buf_destroy(dr->dt.dl.dr_data, db); 2552 } 2553 2554 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2555 2556 ASSERT(db->db_dirtycnt > 0); 2557 db->db_dirtycnt -= 1; 2558 2559 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 2560 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 2561 dbuf_destroy(db); 2562 return (B_TRUE); 2563 } 2564 2565 return (B_FALSE); 2566 } 2567 2568 static void 2569 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) 2570 { 2571 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2572 2573 ASSERT(tx->tx_txg != 0); 2574 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2575 2576 /* 2577 * Quick check for dirtiness. For already dirty blocks, this 2578 * reduces runtime of this function by >90%, and overall performance 2579 * by 50% for some workloads (e.g. file deletion with indirect blocks 2580 * cached). 2581 */ 2582 mutex_enter(&db->db_mtx); 2583 2584 if (db->db_state == DB_CACHED) { 2585 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2586 /* 2587 * It's possible that it is already dirty but not cached, 2588 * because there are some calls to dbuf_dirty() that don't 2589 * go through dmu_buf_will_dirty(). 2590 */ 2591 if (dr != NULL) { 2592 /* This dbuf is already dirty and cached. */ 2593 dbuf_redirty(dr); 2594 mutex_exit(&db->db_mtx); 2595 return; 2596 } 2597 } 2598 mutex_exit(&db->db_mtx); 2599 2600 DB_DNODE_ENTER(db); 2601 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 2602 flags |= DB_RF_HAVESTRUCT; 2603 DB_DNODE_EXIT(db); 2604 (void) dbuf_read(db, NULL, flags); 2605 (void) dbuf_dirty(db, tx); 2606 } 2607 2608 void 2609 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2610 { 2611 dmu_buf_will_dirty_impl(db_fake, 2612 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); 2613 } 2614 2615 boolean_t 2616 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2617 { 2618 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2619 dbuf_dirty_record_t *dr; 2620 2621 mutex_enter(&db->db_mtx); 2622 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2623 mutex_exit(&db->db_mtx); 2624 return (dr != NULL); 2625 } 2626 2627 void 2628 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2629 { 2630 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2631 2632 db->db_state = DB_NOFILL; 2633 DTRACE_SET_STATE(db, "allocating NOFILL buffer"); 2634 dmu_buf_will_fill(db_fake, tx); 2635 } 2636 2637 void 2638 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2639 { 2640 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2641 2642 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2643 ASSERT(tx->tx_txg != 0); 2644 ASSERT(db->db_level == 0); 2645 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2646 2647 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 2648 dmu_tx_private_ok(tx)); 2649 2650 dbuf_noread(db); 2651 (void) dbuf_dirty(db, tx); 2652 } 2653 2654 /* 2655 * This function is effectively the same as dmu_buf_will_dirty(), but 2656 * indicates the caller expects raw encrypted data in the db, and provides 2657 * the crypt params (byteorder, salt, iv, mac) which should be stored in the 2658 * blkptr_t when this dbuf is written. This is only used for blocks of 2659 * dnodes, during raw receive. 2660 */ 2661 void 2662 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, 2663 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) 2664 { 2665 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2666 dbuf_dirty_record_t *dr; 2667 2668 /* 2669 * dr_has_raw_params is only processed for blocks of dnodes 2670 * (see dbuf_sync_dnode_leaf_crypt()). 2671 */ 2672 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 2673 ASSERT3U(db->db_level, ==, 0); 2674 ASSERT(db->db_objset->os_raw_receive); 2675 2676 dmu_buf_will_dirty_impl(db_fake, 2677 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); 2678 2679 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2680 2681 ASSERT3P(dr, !=, NULL); 2682 2683 dr->dt.dl.dr_has_raw_params = B_TRUE; 2684 dr->dt.dl.dr_byteorder = byteorder; 2685 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN); 2686 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN); 2687 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN); 2688 } 2689 2690 static void 2691 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx) 2692 { 2693 struct dirty_leaf *dl; 2694 dbuf_dirty_record_t *dr; 2695 2696 dr = list_head(&db->db_dirty_records); 2697 ASSERT3P(dr, !=, NULL); 2698 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2699 dl = &dr->dt.dl; 2700 dl->dr_overridden_by = *bp; 2701 dl->dr_override_state = DR_OVERRIDDEN; 2702 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2703 } 2704 2705 void 2706 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx) 2707 { 2708 (void) tx; 2709 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2710 dbuf_states_t old_state; 2711 mutex_enter(&db->db_mtx); 2712 DBUF_VERIFY(db); 2713 2714 old_state = db->db_state; 2715 db->db_state = DB_CACHED; 2716 if (old_state == DB_FILL) { 2717 if (db->db_level == 0 && db->db_freed_in_flight) { 2718 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2719 /* we were freed while filling */ 2720 /* XXX dbuf_undirty? */ 2721 memset(db->db.db_data, 0, db->db.db_size); 2722 db->db_freed_in_flight = FALSE; 2723 DTRACE_SET_STATE(db, 2724 "fill done handling freed in flight"); 2725 } else { 2726 DTRACE_SET_STATE(db, "fill done"); 2727 } 2728 cv_broadcast(&db->db_changed); 2729 } 2730 mutex_exit(&db->db_mtx); 2731 } 2732 2733 void 2734 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 2735 bp_embedded_type_t etype, enum zio_compress comp, 2736 int uncompressed_size, int compressed_size, int byteorder, 2737 dmu_tx_t *tx) 2738 { 2739 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2740 struct dirty_leaf *dl; 2741 dmu_object_type_t type; 2742 dbuf_dirty_record_t *dr; 2743 2744 if (etype == BP_EMBEDDED_TYPE_DATA) { 2745 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 2746 SPA_FEATURE_EMBEDDED_DATA)); 2747 } 2748 2749 DB_DNODE_ENTER(db); 2750 type = DB_DNODE(db)->dn_type; 2751 DB_DNODE_EXIT(db); 2752 2753 ASSERT0(db->db_level); 2754 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2755 2756 dmu_buf_will_not_fill(dbuf, tx); 2757 2758 dr = list_head(&db->db_dirty_records); 2759 ASSERT3P(dr, !=, NULL); 2760 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2761 dl = &dr->dt.dl; 2762 encode_embedded_bp_compressed(&dl->dr_overridden_by, 2763 data, comp, uncompressed_size, compressed_size); 2764 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 2765 BP_SET_TYPE(&dl->dr_overridden_by, type); 2766 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 2767 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 2768 2769 dl->dr_override_state = DR_OVERRIDDEN; 2770 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2771 } 2772 2773 void 2774 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx) 2775 { 2776 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2777 dmu_object_type_t type; 2778 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset, 2779 SPA_FEATURE_REDACTED_DATASETS)); 2780 2781 DB_DNODE_ENTER(db); 2782 type = DB_DNODE(db)->dn_type; 2783 DB_DNODE_EXIT(db); 2784 2785 ASSERT0(db->db_level); 2786 dmu_buf_will_not_fill(dbuf, tx); 2787 2788 blkptr_t bp = { { { {0} } } }; 2789 BP_SET_TYPE(&bp, type); 2790 BP_SET_LEVEL(&bp, 0); 2791 BP_SET_BIRTH(&bp, tx->tx_txg, 0); 2792 BP_SET_REDACTED(&bp); 2793 BPE_SET_LSIZE(&bp, dbuf->db_size); 2794 2795 dbuf_override_impl(db, &bp, tx); 2796 } 2797 2798 /* 2799 * Directly assign a provided arc buf to a given dbuf if it's not referenced 2800 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 2801 */ 2802 void 2803 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 2804 { 2805 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2806 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2807 ASSERT(db->db_level == 0); 2808 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 2809 ASSERT(buf != NULL); 2810 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); 2811 ASSERT(tx->tx_txg != 0); 2812 2813 arc_return_buf(buf, db); 2814 ASSERT(arc_released(buf)); 2815 2816 mutex_enter(&db->db_mtx); 2817 2818 while (db->db_state == DB_READ || db->db_state == DB_FILL) 2819 cv_wait(&db->db_changed, &db->db_mtx); 2820 2821 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 2822 2823 if (db->db_state == DB_CACHED && 2824 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2825 /* 2826 * In practice, we will never have a case where we have an 2827 * encrypted arc buffer while additional holds exist on the 2828 * dbuf. We don't handle this here so we simply assert that 2829 * fact instead. 2830 */ 2831 ASSERT(!arc_is_encrypted(buf)); 2832 mutex_exit(&db->db_mtx); 2833 (void) dbuf_dirty(db, tx); 2834 memcpy(db->db.db_data, buf->b_data, db->db.db_size); 2835 arc_buf_destroy(buf, db); 2836 return; 2837 } 2838 2839 if (db->db_state == DB_CACHED) { 2840 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 2841 2842 ASSERT(db->db_buf != NULL); 2843 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 2844 ASSERT(dr->dt.dl.dr_data == db->db_buf); 2845 2846 if (!arc_released(db->db_buf)) { 2847 ASSERT(dr->dt.dl.dr_override_state == 2848 DR_OVERRIDDEN); 2849 arc_release(db->db_buf, db); 2850 } 2851 dr->dt.dl.dr_data = buf; 2852 arc_buf_destroy(db->db_buf, db); 2853 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2854 arc_release(db->db_buf, db); 2855 arc_buf_destroy(db->db_buf, db); 2856 } 2857 db->db_buf = NULL; 2858 } 2859 ASSERT(db->db_buf == NULL); 2860 dbuf_set_data(db, buf); 2861 db->db_state = DB_FILL; 2862 DTRACE_SET_STATE(db, "filling assigned arcbuf"); 2863 mutex_exit(&db->db_mtx); 2864 (void) dbuf_dirty(db, tx); 2865 dmu_buf_fill_done(&db->db, tx); 2866 } 2867 2868 void 2869 dbuf_destroy(dmu_buf_impl_t *db) 2870 { 2871 dnode_t *dn; 2872 dmu_buf_impl_t *parent = db->db_parent; 2873 dmu_buf_impl_t *dndb; 2874 2875 ASSERT(MUTEX_HELD(&db->db_mtx)); 2876 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2877 2878 if (db->db_buf != NULL) { 2879 arc_buf_destroy(db->db_buf, db); 2880 db->db_buf = NULL; 2881 } 2882 2883 if (db->db_blkid == DMU_BONUS_BLKID) { 2884 int slots = DB_DNODE(db)->dn_num_slots; 2885 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 2886 if (db->db.db_data != NULL) { 2887 kmem_free(db->db.db_data, bonuslen); 2888 arc_space_return(bonuslen, ARC_SPACE_BONUS); 2889 db->db_state = DB_UNCACHED; 2890 DTRACE_SET_STATE(db, "buffer cleared"); 2891 } 2892 } 2893 2894 dbuf_clear_data(db); 2895 2896 if (multilist_link_active(&db->db_cache_link)) { 2897 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2898 db->db_caching_status == DB_DBUF_METADATA_CACHE); 2899 2900 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 2901 (void) zfs_refcount_remove_many( 2902 &dbuf_caches[db->db_caching_status].size, 2903 db->db.db_size, db); 2904 2905 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 2906 DBUF_STAT_BUMPDOWN(metadata_cache_count); 2907 } else { 2908 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 2909 DBUF_STAT_BUMPDOWN(cache_count); 2910 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 2911 db->db.db_size); 2912 } 2913 db->db_caching_status = DB_NO_CACHE; 2914 } 2915 2916 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2917 ASSERT(db->db_data_pending == NULL); 2918 ASSERT(list_is_empty(&db->db_dirty_records)); 2919 2920 db->db_state = DB_EVICTING; 2921 DTRACE_SET_STATE(db, "buffer eviction started"); 2922 db->db_blkptr = NULL; 2923 2924 /* 2925 * Now that db_state is DB_EVICTING, nobody else can find this via 2926 * the hash table. We can now drop db_mtx, which allows us to 2927 * acquire the dn_dbufs_mtx. 2928 */ 2929 mutex_exit(&db->db_mtx); 2930 2931 DB_DNODE_ENTER(db); 2932 dn = DB_DNODE(db); 2933 dndb = dn->dn_dbuf; 2934 if (db->db_blkid != DMU_BONUS_BLKID) { 2935 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2936 if (needlock) 2937 mutex_enter_nested(&dn->dn_dbufs_mtx, 2938 NESTED_SINGLE); 2939 avl_remove(&dn->dn_dbufs, db); 2940 membar_producer(); 2941 DB_DNODE_EXIT(db); 2942 if (needlock) 2943 mutex_exit(&dn->dn_dbufs_mtx); 2944 /* 2945 * Decrementing the dbuf count means that the hold corresponding 2946 * to the removed dbuf is no longer discounted in dnode_move(), 2947 * so the dnode cannot be moved until after we release the hold. 2948 * The membar_producer() ensures visibility of the decremented 2949 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2950 * release any lock. 2951 */ 2952 mutex_enter(&dn->dn_mtx); 2953 dnode_rele_and_unlock(dn, db, B_TRUE); 2954 db->db_dnode_handle = NULL; 2955 2956 dbuf_hash_remove(db); 2957 } else { 2958 DB_DNODE_EXIT(db); 2959 } 2960 2961 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2962 2963 db->db_parent = NULL; 2964 2965 ASSERT(db->db_buf == NULL); 2966 ASSERT(db->db.db_data == NULL); 2967 ASSERT(db->db_hash_next == NULL); 2968 ASSERT(db->db_blkptr == NULL); 2969 ASSERT(db->db_data_pending == NULL); 2970 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2971 ASSERT(!multilist_link_active(&db->db_cache_link)); 2972 2973 /* 2974 * If this dbuf is referenced from an indirect dbuf, 2975 * decrement the ref count on the indirect dbuf. 2976 */ 2977 if (parent && parent != dndb) { 2978 mutex_enter(&parent->db_mtx); 2979 dbuf_rele_and_unlock(parent, db, B_TRUE); 2980 } 2981 2982 kmem_cache_free(dbuf_kmem_cache, db); 2983 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 2984 } 2985 2986 /* 2987 * Note: While bpp will always be updated if the function returns success, 2988 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2989 * this happens when the dnode is the meta-dnode, or {user|group|project}used 2990 * object. 2991 */ 2992 __attribute__((always_inline)) 2993 static inline int 2994 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2995 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2996 { 2997 *parentp = NULL; 2998 *bpp = NULL; 2999 3000 ASSERT(blkid != DMU_BONUS_BLKID); 3001 3002 if (blkid == DMU_SPILL_BLKID) { 3003 mutex_enter(&dn->dn_mtx); 3004 if (dn->dn_have_spill && 3005 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 3006 *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 3007 else 3008 *bpp = NULL; 3009 dbuf_add_ref(dn->dn_dbuf, NULL); 3010 *parentp = dn->dn_dbuf; 3011 mutex_exit(&dn->dn_mtx); 3012 return (0); 3013 } 3014 3015 int nlevels = 3016 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 3017 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 3018 3019 ASSERT3U(level * epbs, <, 64); 3020 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3021 /* 3022 * This assertion shouldn't trip as long as the max indirect block size 3023 * is less than 1M. The reason for this is that up to that point, 3024 * the number of levels required to address an entire object with blocks 3025 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 3026 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 3027 * (i.e. we can address the entire object), objects will all use at most 3028 * N-1 levels and the assertion won't overflow. However, once epbs is 3029 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 3030 * enough to address an entire object, so objects will have 5 levels, 3031 * but then this assertion will overflow. 3032 * 3033 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 3034 * need to redo this logic to handle overflows. 3035 */ 3036 ASSERT(level >= nlevels || 3037 ((nlevels - level - 1) * epbs) + 3038 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 3039 if (level >= nlevels || 3040 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 3041 ((nlevels - level - 1) * epbs)) || 3042 (fail_sparse && 3043 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 3044 /* the buffer has no parent yet */ 3045 return (SET_ERROR(ENOENT)); 3046 } else if (level < nlevels-1) { 3047 /* this block is referenced from an indirect block */ 3048 int err; 3049 3050 err = dbuf_hold_impl(dn, level + 1, 3051 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 3052 3053 if (err) 3054 return (err); 3055 err = dbuf_read(*parentp, NULL, 3056 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 3057 if (err) { 3058 dbuf_rele(*parentp, NULL); 3059 *parentp = NULL; 3060 return (err); 3061 } 3062 rw_enter(&(*parentp)->db_rwlock, RW_READER); 3063 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 3064 (blkid & ((1ULL << epbs) - 1)); 3065 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 3066 ASSERT(BP_IS_HOLE(*bpp)); 3067 rw_exit(&(*parentp)->db_rwlock); 3068 return (0); 3069 } else { 3070 /* the block is referenced from the dnode */ 3071 ASSERT3U(level, ==, nlevels-1); 3072 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 3073 blkid < dn->dn_phys->dn_nblkptr); 3074 if (dn->dn_dbuf) { 3075 dbuf_add_ref(dn->dn_dbuf, NULL); 3076 *parentp = dn->dn_dbuf; 3077 } 3078 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 3079 return (0); 3080 } 3081 } 3082 3083 static dmu_buf_impl_t * 3084 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 3085 dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash) 3086 { 3087 objset_t *os = dn->dn_objset; 3088 dmu_buf_impl_t *db, *odb; 3089 3090 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3091 ASSERT(dn->dn_type != DMU_OT_NONE); 3092 3093 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 3094 3095 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t), 3096 offsetof(dbuf_dirty_record_t, dr_dbuf_node)); 3097 3098 db->db_objset = os; 3099 db->db.db_object = dn->dn_object; 3100 db->db_level = level; 3101 db->db_blkid = blkid; 3102 db->db_dirtycnt = 0; 3103 db->db_dnode_handle = dn->dn_handle; 3104 db->db_parent = parent; 3105 db->db_blkptr = blkptr; 3106 db->db_hash = hash; 3107 3108 db->db_user = NULL; 3109 db->db_user_immediate_evict = FALSE; 3110 db->db_freed_in_flight = FALSE; 3111 db->db_pending_evict = FALSE; 3112 3113 if (blkid == DMU_BONUS_BLKID) { 3114 ASSERT3P(parent, ==, dn->dn_dbuf); 3115 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 3116 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 3117 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 3118 db->db.db_offset = DMU_BONUS_BLKID; 3119 db->db_state = DB_UNCACHED; 3120 DTRACE_SET_STATE(db, "bonus buffer created"); 3121 db->db_caching_status = DB_NO_CACHE; 3122 /* the bonus dbuf is not placed in the hash table */ 3123 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3124 return (db); 3125 } else if (blkid == DMU_SPILL_BLKID) { 3126 db->db.db_size = (blkptr != NULL) ? 3127 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 3128 db->db.db_offset = 0; 3129 } else { 3130 int blocksize = 3131 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 3132 db->db.db_size = blocksize; 3133 db->db.db_offset = db->db_blkid * blocksize; 3134 } 3135 3136 /* 3137 * Hold the dn_dbufs_mtx while we get the new dbuf 3138 * in the hash table *and* added to the dbufs list. 3139 * This prevents a possible deadlock with someone 3140 * trying to look up this dbuf before it's added to the 3141 * dn_dbufs list. 3142 */ 3143 mutex_enter(&dn->dn_dbufs_mtx); 3144 db->db_state = DB_EVICTING; /* not worth logging this state change */ 3145 if ((odb = dbuf_hash_insert(db)) != NULL) { 3146 /* someone else inserted it first */ 3147 mutex_exit(&dn->dn_dbufs_mtx); 3148 kmem_cache_free(dbuf_kmem_cache, db); 3149 DBUF_STAT_BUMP(hash_insert_race); 3150 return (odb); 3151 } 3152 avl_add(&dn->dn_dbufs, db); 3153 3154 db->db_state = DB_UNCACHED; 3155 DTRACE_SET_STATE(db, "regular buffer created"); 3156 db->db_caching_status = DB_NO_CACHE; 3157 mutex_exit(&dn->dn_dbufs_mtx); 3158 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3159 3160 if (parent && parent != dn->dn_dbuf) 3161 dbuf_add_ref(parent, db); 3162 3163 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 3164 zfs_refcount_count(&dn->dn_holds) > 0); 3165 (void) zfs_refcount_add(&dn->dn_holds, db); 3166 3167 dprintf_dbuf(db, "db=%p\n", db); 3168 3169 return (db); 3170 } 3171 3172 /* 3173 * This function returns a block pointer and information about the object, 3174 * given a dnode and a block. This is a publicly accessible version of 3175 * dbuf_findbp that only returns some information, rather than the 3176 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock 3177 * should be locked as (at least) a reader. 3178 */ 3179 int 3180 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid, 3181 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift) 3182 { 3183 dmu_buf_impl_t *dbp = NULL; 3184 blkptr_t *bp2; 3185 int err = 0; 3186 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3187 3188 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2); 3189 if (err == 0) { 3190 *bp = *bp2; 3191 if (dbp != NULL) 3192 dbuf_rele(dbp, NULL); 3193 if (datablkszsec != NULL) 3194 *datablkszsec = dn->dn_phys->dn_datablkszsec; 3195 if (indblkshift != NULL) 3196 *indblkshift = dn->dn_phys->dn_indblkshift; 3197 } 3198 3199 return (err); 3200 } 3201 3202 typedef struct dbuf_prefetch_arg { 3203 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 3204 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 3205 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 3206 int dpa_curlevel; /* The current level that we're reading */ 3207 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 3208 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 3209 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 3210 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 3211 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */ 3212 void *dpa_arg; /* prefetch completion arg */ 3213 } dbuf_prefetch_arg_t; 3214 3215 static void 3216 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done) 3217 { 3218 if (dpa->dpa_cb != NULL) { 3219 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level, 3220 dpa->dpa_zb.zb_blkid, io_done); 3221 } 3222 kmem_free(dpa, sizeof (*dpa)); 3223 } 3224 3225 static void 3226 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb, 3227 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3228 { 3229 (void) zio, (void) zb, (void) iobp; 3230 dbuf_prefetch_arg_t *dpa = private; 3231 3232 if (abuf != NULL) 3233 arc_buf_destroy(abuf, private); 3234 3235 dbuf_prefetch_fini(dpa, B_TRUE); 3236 } 3237 3238 /* 3239 * Actually issue the prefetch read for the block given. 3240 */ 3241 static void 3242 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 3243 { 3244 ASSERT(!BP_IS_REDACTED(bp) || 3245 dsl_dataset_feature_is_active( 3246 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3247 SPA_FEATURE_REDACTED_DATASETS)); 3248 3249 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 3250 return (dbuf_prefetch_fini(dpa, B_FALSE)); 3251 3252 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 3253 arc_flags_t aflags = 3254 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 3255 ARC_FLAG_NO_BUF; 3256 3257 /* dnodes are always read as raw and then converted later */ 3258 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && 3259 dpa->dpa_curlevel == 0) 3260 zio_flags |= ZIO_FLAG_RAW; 3261 3262 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3263 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 3264 ASSERT(dpa->dpa_zio != NULL); 3265 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, 3266 dbuf_issue_final_prefetch_done, dpa, 3267 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); 3268 } 3269 3270 /* 3271 * Called when an indirect block above our prefetch target is read in. This 3272 * will either read in the next indirect block down the tree or issue the actual 3273 * prefetch if the next block down is our target. 3274 */ 3275 static void 3276 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 3277 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3278 { 3279 (void) zb, (void) iobp; 3280 dbuf_prefetch_arg_t *dpa = private; 3281 3282 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 3283 ASSERT3S(dpa->dpa_curlevel, >, 0); 3284 3285 if (abuf == NULL) { 3286 ASSERT(zio == NULL || zio->io_error != 0); 3287 dbuf_prefetch_fini(dpa, B_TRUE); 3288 return; 3289 } 3290 ASSERT(zio == NULL || zio->io_error == 0); 3291 3292 /* 3293 * The dpa_dnode is only valid if we are called with a NULL 3294 * zio. This indicates that the arc_read() returned without 3295 * first calling zio_read() to issue a physical read. Once 3296 * a physical read is made the dpa_dnode must be invalidated 3297 * as the locks guarding it may have been dropped. If the 3298 * dpa_dnode is still valid, then we want to add it to the dbuf 3299 * cache. To do so, we must hold the dbuf associated with the block 3300 * we just prefetched, read its contents so that we associate it 3301 * with an arc_buf_t, and then release it. 3302 */ 3303 if (zio != NULL) { 3304 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 3305 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { 3306 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 3307 } else { 3308 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 3309 } 3310 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 3311 3312 dpa->dpa_dnode = NULL; 3313 } else if (dpa->dpa_dnode != NULL) { 3314 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 3315 (dpa->dpa_epbs * (dpa->dpa_curlevel - 3316 dpa->dpa_zb.zb_level)); 3317 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 3318 dpa->dpa_curlevel, curblkid, FTAG); 3319 if (db == NULL) { 3320 arc_buf_destroy(abuf, private); 3321 dbuf_prefetch_fini(dpa, B_TRUE); 3322 return; 3323 } 3324 (void) dbuf_read(db, NULL, 3325 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 3326 dbuf_rele(db, FTAG); 3327 } 3328 3329 dpa->dpa_curlevel--; 3330 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 3331 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 3332 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 3333 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 3334 3335 ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode && 3336 dsl_dataset_feature_is_active( 3337 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3338 SPA_FEATURE_REDACTED_DATASETS))); 3339 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 3340 arc_buf_destroy(abuf, private); 3341 dbuf_prefetch_fini(dpa, B_TRUE); 3342 return; 3343 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 3344 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 3345 dbuf_issue_final_prefetch(dpa, bp); 3346 } else { 3347 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3348 zbookmark_phys_t zb; 3349 3350 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3351 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 3352 iter_aflags |= ARC_FLAG_L2CACHE; 3353 3354 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3355 3356 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 3357 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 3358 3359 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3360 bp, dbuf_prefetch_indirect_done, dpa, 3361 ZIO_PRIORITY_SYNC_READ, 3362 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3363 &iter_aflags, &zb); 3364 } 3365 3366 arc_buf_destroy(abuf, private); 3367 } 3368 3369 /* 3370 * Issue prefetch reads for the given block on the given level. If the indirect 3371 * blocks above that block are not in memory, we will read them in 3372 * asynchronously. As a result, this call never blocks waiting for a read to 3373 * complete. Note that the prefetch might fail if the dataset is encrypted and 3374 * the encryption key is unmapped before the IO completes. 3375 */ 3376 int 3377 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid, 3378 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb, 3379 void *arg) 3380 { 3381 blkptr_t bp; 3382 int epbs, nlevels, curlevel; 3383 uint64_t curblkid; 3384 3385 ASSERT(blkid != DMU_BONUS_BLKID); 3386 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3387 3388 if (blkid > dn->dn_maxblkid) 3389 goto no_issue; 3390 3391 if (level == 0 && dnode_block_freed(dn, blkid)) 3392 goto no_issue; 3393 3394 /* 3395 * This dnode hasn't been written to disk yet, so there's nothing to 3396 * prefetch. 3397 */ 3398 nlevels = dn->dn_phys->dn_nlevels; 3399 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 3400 goto no_issue; 3401 3402 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3403 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 3404 goto no_issue; 3405 3406 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 3407 level, blkid, NULL); 3408 if (db != NULL) { 3409 mutex_exit(&db->db_mtx); 3410 /* 3411 * This dbuf already exists. It is either CACHED, or 3412 * (we assume) about to be read or filled. 3413 */ 3414 goto no_issue; 3415 } 3416 3417 /* 3418 * Find the closest ancestor (indirect block) of the target block 3419 * that is present in the cache. In this indirect block, we will 3420 * find the bp that is at curlevel, curblkid. 3421 */ 3422 curlevel = level; 3423 curblkid = blkid; 3424 while (curlevel < nlevels - 1) { 3425 int parent_level = curlevel + 1; 3426 uint64_t parent_blkid = curblkid >> epbs; 3427 dmu_buf_impl_t *db; 3428 3429 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 3430 FALSE, TRUE, FTAG, &db) == 0) { 3431 blkptr_t *bpp = db->db_buf->b_data; 3432 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 3433 dbuf_rele(db, FTAG); 3434 break; 3435 } 3436 3437 curlevel = parent_level; 3438 curblkid = parent_blkid; 3439 } 3440 3441 if (curlevel == nlevels - 1) { 3442 /* No cached indirect blocks found. */ 3443 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 3444 bp = dn->dn_phys->dn_blkptr[curblkid]; 3445 } 3446 ASSERT(!BP_IS_REDACTED(&bp) || 3447 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset, 3448 SPA_FEATURE_REDACTED_DATASETS)); 3449 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp)) 3450 goto no_issue; 3451 3452 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 3453 3454 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 3455 ZIO_FLAG_CANFAIL); 3456 3457 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 3458 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 3459 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3460 dn->dn_object, level, blkid); 3461 dpa->dpa_curlevel = curlevel; 3462 dpa->dpa_prio = prio; 3463 dpa->dpa_aflags = aflags; 3464 dpa->dpa_spa = dn->dn_objset->os_spa; 3465 dpa->dpa_dnode = dn; 3466 dpa->dpa_epbs = epbs; 3467 dpa->dpa_zio = pio; 3468 dpa->dpa_cb = cb; 3469 dpa->dpa_arg = arg; 3470 3471 if (!DNODE_LEVEL_IS_CACHEABLE(dn, level)) 3472 dpa->dpa_aflags |= ARC_FLAG_UNCACHED; 3473 else if (dnode_level_is_l2cacheable(&bp, dn, level)) 3474 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 3475 3476 /* 3477 * If we have the indirect just above us, no need to do the asynchronous 3478 * prefetch chain; we'll just run the last step ourselves. If we're at 3479 * a higher level, though, we want to issue the prefetches for all the 3480 * indirect blocks asynchronously, so we can go on with whatever we were 3481 * doing. 3482 */ 3483 if (curlevel == level) { 3484 ASSERT3U(curblkid, ==, blkid); 3485 dbuf_issue_final_prefetch(dpa, &bp); 3486 } else { 3487 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3488 zbookmark_phys_t zb; 3489 3490 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3491 if (dnode_level_is_l2cacheable(&bp, dn, level)) 3492 iter_aflags |= ARC_FLAG_L2CACHE; 3493 3494 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3495 dn->dn_object, curlevel, curblkid); 3496 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3497 &bp, dbuf_prefetch_indirect_done, dpa, 3498 ZIO_PRIORITY_SYNC_READ, 3499 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3500 &iter_aflags, &zb); 3501 } 3502 /* 3503 * We use pio here instead of dpa_zio since it's possible that 3504 * dpa may have already been freed. 3505 */ 3506 zio_nowait(pio); 3507 return (1); 3508 no_issue: 3509 if (cb != NULL) 3510 cb(arg, level, blkid, B_FALSE); 3511 return (0); 3512 } 3513 3514 int 3515 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 3516 arc_flags_t aflags) 3517 { 3518 3519 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL)); 3520 } 3521 3522 /* 3523 * Helper function for dbuf_hold_impl() to copy a buffer. Handles 3524 * the case of encrypted, compressed and uncompressed buffers by 3525 * allocating the new buffer, respectively, with arc_alloc_raw_buf(), 3526 * arc_alloc_compressed_buf() or arc_alloc_buf().* 3527 * 3528 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl(). 3529 */ 3530 noinline static void 3531 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db) 3532 { 3533 dbuf_dirty_record_t *dr = db->db_data_pending; 3534 arc_buf_t *data = dr->dt.dl.dr_data; 3535 enum zio_compress compress_type = arc_get_compression(data); 3536 uint8_t complevel = arc_get_complevel(data); 3537 3538 if (arc_is_encrypted(data)) { 3539 boolean_t byteorder; 3540 uint8_t salt[ZIO_DATA_SALT_LEN]; 3541 uint8_t iv[ZIO_DATA_IV_LEN]; 3542 uint8_t mac[ZIO_DATA_MAC_LEN]; 3543 3544 arc_get_raw_params(data, &byteorder, salt, iv, mac); 3545 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db, 3546 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, 3547 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), 3548 compress_type, complevel)); 3549 } else if (compress_type != ZIO_COMPRESS_OFF) { 3550 dbuf_set_data(db, arc_alloc_compressed_buf( 3551 dn->dn_objset->os_spa, db, arc_buf_size(data), 3552 arc_buf_lsize(data), compress_type, complevel)); 3553 } else { 3554 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db, 3555 DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 3556 } 3557 3558 rw_enter(&db->db_rwlock, RW_WRITER); 3559 memcpy(db->db.db_data, data->b_data, arc_buf_size(data)); 3560 rw_exit(&db->db_rwlock); 3561 } 3562 3563 /* 3564 * Returns with db_holds incremented, and db_mtx not held. 3565 * Note: dn_struct_rwlock must be held. 3566 */ 3567 int 3568 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 3569 boolean_t fail_sparse, boolean_t fail_uncached, 3570 const void *tag, dmu_buf_impl_t **dbp) 3571 { 3572 dmu_buf_impl_t *db, *parent = NULL; 3573 uint64_t hv; 3574 3575 /* If the pool has been created, verify the tx_sync_lock is not held */ 3576 spa_t *spa = dn->dn_objset->os_spa; 3577 dsl_pool_t *dp = spa->spa_dsl_pool; 3578 if (dp != NULL) { 3579 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock)); 3580 } 3581 3582 ASSERT(blkid != DMU_BONUS_BLKID); 3583 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3584 ASSERT3U(dn->dn_nlevels, >, level); 3585 3586 *dbp = NULL; 3587 3588 /* dbuf_find() returns with db_mtx held */ 3589 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv); 3590 3591 if (db == NULL) { 3592 blkptr_t *bp = NULL; 3593 int err; 3594 3595 if (fail_uncached) 3596 return (SET_ERROR(ENOENT)); 3597 3598 ASSERT3P(parent, ==, NULL); 3599 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 3600 if (fail_sparse) { 3601 if (err == 0 && bp && BP_IS_HOLE(bp)) 3602 err = SET_ERROR(ENOENT); 3603 if (err) { 3604 if (parent) 3605 dbuf_rele(parent, NULL); 3606 return (err); 3607 } 3608 } 3609 if (err && err != ENOENT) 3610 return (err); 3611 db = dbuf_create(dn, level, blkid, parent, bp, hv); 3612 } 3613 3614 if (fail_uncached && db->db_state != DB_CACHED) { 3615 mutex_exit(&db->db_mtx); 3616 return (SET_ERROR(ENOENT)); 3617 } 3618 3619 if (db->db_buf != NULL) { 3620 arc_buf_access(db->db_buf); 3621 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 3622 } 3623 3624 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 3625 3626 /* 3627 * If this buffer is currently syncing out, and we are 3628 * still referencing it from db_data, we need to make a copy 3629 * of it in case we decide we want to dirty it again in this txg. 3630 */ 3631 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 3632 dn->dn_object != DMU_META_DNODE_OBJECT && 3633 db->db_state == DB_CACHED && db->db_data_pending) { 3634 dbuf_dirty_record_t *dr = db->db_data_pending; 3635 if (dr->dt.dl.dr_data == db->db_buf) 3636 dbuf_hold_copy(dn, db); 3637 } 3638 3639 if (multilist_link_active(&db->db_cache_link)) { 3640 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3641 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 3642 db->db_caching_status == DB_DBUF_METADATA_CACHE); 3643 3644 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 3645 (void) zfs_refcount_remove_many( 3646 &dbuf_caches[db->db_caching_status].size, 3647 db->db.db_size, db); 3648 3649 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 3650 DBUF_STAT_BUMPDOWN(metadata_cache_count); 3651 } else { 3652 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 3653 DBUF_STAT_BUMPDOWN(cache_count); 3654 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 3655 db->db.db_size); 3656 } 3657 db->db_caching_status = DB_NO_CACHE; 3658 } 3659 (void) zfs_refcount_add(&db->db_holds, tag); 3660 DBUF_VERIFY(db); 3661 mutex_exit(&db->db_mtx); 3662 3663 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 3664 if (parent) 3665 dbuf_rele(parent, NULL); 3666 3667 ASSERT3P(DB_DNODE(db), ==, dn); 3668 ASSERT3U(db->db_blkid, ==, blkid); 3669 ASSERT3U(db->db_level, ==, level); 3670 *dbp = db; 3671 3672 return (0); 3673 } 3674 3675 dmu_buf_impl_t * 3676 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag) 3677 { 3678 return (dbuf_hold_level(dn, 0, blkid, tag)); 3679 } 3680 3681 dmu_buf_impl_t * 3682 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag) 3683 { 3684 dmu_buf_impl_t *db; 3685 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 3686 return (err ? NULL : db); 3687 } 3688 3689 void 3690 dbuf_create_bonus(dnode_t *dn) 3691 { 3692 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 3693 3694 ASSERT(dn->dn_bonus == NULL); 3695 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL, 3696 dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID)); 3697 } 3698 3699 int 3700 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 3701 { 3702 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3703 3704 if (db->db_blkid != DMU_SPILL_BLKID) 3705 return (SET_ERROR(ENOTSUP)); 3706 if (blksz == 0) 3707 blksz = SPA_MINBLOCKSIZE; 3708 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 3709 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 3710 3711 dbuf_new_size(db, blksz, tx); 3712 3713 return (0); 3714 } 3715 3716 void 3717 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 3718 { 3719 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 3720 } 3721 3722 #pragma weak dmu_buf_add_ref = dbuf_add_ref 3723 void 3724 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag) 3725 { 3726 int64_t holds = zfs_refcount_add(&db->db_holds, tag); 3727 VERIFY3S(holds, >, 1); 3728 } 3729 3730 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 3731 boolean_t 3732 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 3733 const void *tag) 3734 { 3735 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3736 dmu_buf_impl_t *found_db; 3737 boolean_t result = B_FALSE; 3738 3739 if (blkid == DMU_BONUS_BLKID) 3740 found_db = dbuf_find_bonus(os, obj); 3741 else 3742 found_db = dbuf_find(os, obj, 0, blkid, NULL); 3743 3744 if (found_db != NULL) { 3745 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 3746 (void) zfs_refcount_add(&db->db_holds, tag); 3747 result = B_TRUE; 3748 } 3749 mutex_exit(&found_db->db_mtx); 3750 } 3751 return (result); 3752 } 3753 3754 /* 3755 * If you call dbuf_rele() you had better not be referencing the dnode handle 3756 * unless you have some other direct or indirect hold on the dnode. (An indirect 3757 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 3758 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 3759 * dnode's parent dbuf evicting its dnode handles. 3760 */ 3761 void 3762 dbuf_rele(dmu_buf_impl_t *db, const void *tag) 3763 { 3764 mutex_enter(&db->db_mtx); 3765 dbuf_rele_and_unlock(db, tag, B_FALSE); 3766 } 3767 3768 void 3769 dmu_buf_rele(dmu_buf_t *db, const void *tag) 3770 { 3771 dbuf_rele((dmu_buf_impl_t *)db, tag); 3772 } 3773 3774 /* 3775 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 3776 * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 3777 * argument should be set if we are already in the dbuf-evicting code 3778 * path, in which case we don't want to recursively evict. This allows us to 3779 * avoid deeply nested stacks that would have a call flow similar to this: 3780 * 3781 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 3782 * ^ | 3783 * | | 3784 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 3785 * 3786 */ 3787 void 3788 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting) 3789 { 3790 int64_t holds; 3791 uint64_t size; 3792 3793 ASSERT(MUTEX_HELD(&db->db_mtx)); 3794 DBUF_VERIFY(db); 3795 3796 /* 3797 * Remove the reference to the dbuf before removing its hold on the 3798 * dnode so we can guarantee in dnode_move() that a referenced bonus 3799 * buffer has a corresponding dnode hold. 3800 */ 3801 holds = zfs_refcount_remove(&db->db_holds, tag); 3802 ASSERT(holds >= 0); 3803 3804 /* 3805 * We can't freeze indirects if there is a possibility that they 3806 * may be modified in the current syncing context. 3807 */ 3808 if (db->db_buf != NULL && 3809 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 3810 arc_buf_freeze(db->db_buf); 3811 } 3812 3813 if (holds == db->db_dirtycnt && 3814 db->db_level == 0 && db->db_user_immediate_evict) 3815 dbuf_evict_user(db); 3816 3817 if (holds == 0) { 3818 if (db->db_blkid == DMU_BONUS_BLKID) { 3819 dnode_t *dn; 3820 boolean_t evict_dbuf = db->db_pending_evict; 3821 3822 /* 3823 * If the dnode moves here, we cannot cross this 3824 * barrier until the move completes. 3825 */ 3826 DB_DNODE_ENTER(db); 3827 3828 dn = DB_DNODE(db); 3829 atomic_dec_32(&dn->dn_dbufs_count); 3830 3831 /* 3832 * Decrementing the dbuf count means that the bonus 3833 * buffer's dnode hold is no longer discounted in 3834 * dnode_move(). The dnode cannot move until after 3835 * the dnode_rele() below. 3836 */ 3837 DB_DNODE_EXIT(db); 3838 3839 /* 3840 * Do not reference db after its lock is dropped. 3841 * Another thread may evict it. 3842 */ 3843 mutex_exit(&db->db_mtx); 3844 3845 if (evict_dbuf) 3846 dnode_evict_bonus(dn); 3847 3848 dnode_rele(dn, db); 3849 } else if (db->db_buf == NULL) { 3850 /* 3851 * This is a special case: we never associated this 3852 * dbuf with any data allocated from the ARC. 3853 */ 3854 ASSERT(db->db_state == DB_UNCACHED || 3855 db->db_state == DB_NOFILL); 3856 dbuf_destroy(db); 3857 } else if (arc_released(db->db_buf)) { 3858 /* 3859 * This dbuf has anonymous data associated with it. 3860 */ 3861 dbuf_destroy(db); 3862 } else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) || 3863 db->db_pending_evict) { 3864 dbuf_destroy(db); 3865 } else if (!multilist_link_active(&db->db_cache_link)) { 3866 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 3867 3868 dbuf_cached_state_t dcs = 3869 dbuf_include_in_metadata_cache(db) ? 3870 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 3871 db->db_caching_status = dcs; 3872 3873 multilist_insert(&dbuf_caches[dcs].cache, db); 3874 uint64_t db_size = db->db.db_size; 3875 size = zfs_refcount_add_many( 3876 &dbuf_caches[dcs].size, db_size, db); 3877 uint8_t db_level = db->db_level; 3878 mutex_exit(&db->db_mtx); 3879 3880 if (dcs == DB_DBUF_METADATA_CACHE) { 3881 DBUF_STAT_BUMP(metadata_cache_count); 3882 DBUF_STAT_MAX(metadata_cache_size_bytes_max, 3883 size); 3884 } else { 3885 DBUF_STAT_BUMP(cache_count); 3886 DBUF_STAT_MAX(cache_size_bytes_max, size); 3887 DBUF_STAT_BUMP(cache_levels[db_level]); 3888 DBUF_STAT_INCR(cache_levels_bytes[db_level], 3889 db_size); 3890 } 3891 3892 if (dcs == DB_DBUF_CACHE && !evicting) 3893 dbuf_evict_notify(size); 3894 } 3895 } else { 3896 mutex_exit(&db->db_mtx); 3897 } 3898 3899 } 3900 3901 #pragma weak dmu_buf_refcount = dbuf_refcount 3902 uint64_t 3903 dbuf_refcount(dmu_buf_impl_t *db) 3904 { 3905 return (zfs_refcount_count(&db->db_holds)); 3906 } 3907 3908 uint64_t 3909 dmu_buf_user_refcount(dmu_buf_t *db_fake) 3910 { 3911 uint64_t holds; 3912 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3913 3914 mutex_enter(&db->db_mtx); 3915 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); 3916 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; 3917 mutex_exit(&db->db_mtx); 3918 3919 return (holds); 3920 } 3921 3922 void * 3923 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 3924 dmu_buf_user_t *new_user) 3925 { 3926 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3927 3928 mutex_enter(&db->db_mtx); 3929 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3930 if (db->db_user == old_user) 3931 db->db_user = new_user; 3932 else 3933 old_user = db->db_user; 3934 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3935 mutex_exit(&db->db_mtx); 3936 3937 return (old_user); 3938 } 3939 3940 void * 3941 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3942 { 3943 return (dmu_buf_replace_user(db_fake, NULL, user)); 3944 } 3945 3946 void * 3947 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3948 { 3949 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3950 3951 db->db_user_immediate_evict = TRUE; 3952 return (dmu_buf_set_user(db_fake, user)); 3953 } 3954 3955 void * 3956 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3957 { 3958 return (dmu_buf_replace_user(db_fake, user, NULL)); 3959 } 3960 3961 void * 3962 dmu_buf_get_user(dmu_buf_t *db_fake) 3963 { 3964 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3965 3966 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3967 return (db->db_user); 3968 } 3969 3970 void 3971 dmu_buf_user_evict_wait(void) 3972 { 3973 taskq_wait(dbu_evict_taskq); 3974 } 3975 3976 blkptr_t * 3977 dmu_buf_get_blkptr(dmu_buf_t *db) 3978 { 3979 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3980 return (dbi->db_blkptr); 3981 } 3982 3983 objset_t * 3984 dmu_buf_get_objset(dmu_buf_t *db) 3985 { 3986 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3987 return (dbi->db_objset); 3988 } 3989 3990 dnode_t * 3991 dmu_buf_dnode_enter(dmu_buf_t *db) 3992 { 3993 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3994 DB_DNODE_ENTER(dbi); 3995 return (DB_DNODE(dbi)); 3996 } 3997 3998 void 3999 dmu_buf_dnode_exit(dmu_buf_t *db) 4000 { 4001 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4002 DB_DNODE_EXIT(dbi); 4003 } 4004 4005 static void 4006 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 4007 { 4008 /* ASSERT(dmu_tx_is_syncing(tx) */ 4009 ASSERT(MUTEX_HELD(&db->db_mtx)); 4010 4011 if (db->db_blkptr != NULL) 4012 return; 4013 4014 if (db->db_blkid == DMU_SPILL_BLKID) { 4015 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 4016 BP_ZERO(db->db_blkptr); 4017 return; 4018 } 4019 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 4020 /* 4021 * This buffer was allocated at a time when there was 4022 * no available blkptrs from the dnode, or it was 4023 * inappropriate to hook it in (i.e., nlevels mismatch). 4024 */ 4025 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 4026 ASSERT(db->db_parent == NULL); 4027 db->db_parent = dn->dn_dbuf; 4028 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 4029 DBUF_VERIFY(db); 4030 } else { 4031 dmu_buf_impl_t *parent = db->db_parent; 4032 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4033 4034 ASSERT(dn->dn_phys->dn_nlevels > 1); 4035 if (parent == NULL) { 4036 mutex_exit(&db->db_mtx); 4037 rw_enter(&dn->dn_struct_rwlock, RW_READER); 4038 parent = dbuf_hold_level(dn, db->db_level + 1, 4039 db->db_blkid >> epbs, db); 4040 rw_exit(&dn->dn_struct_rwlock); 4041 mutex_enter(&db->db_mtx); 4042 db->db_parent = parent; 4043 } 4044 db->db_blkptr = (blkptr_t *)parent->db.db_data + 4045 (db->db_blkid & ((1ULL << epbs) - 1)); 4046 DBUF_VERIFY(db); 4047 } 4048 } 4049 4050 static void 4051 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4052 { 4053 dmu_buf_impl_t *db = dr->dr_dbuf; 4054 void *data = dr->dt.dl.dr_data; 4055 4056 ASSERT0(db->db_level); 4057 ASSERT(MUTEX_HELD(&db->db_mtx)); 4058 ASSERT(db->db_blkid == DMU_BONUS_BLKID); 4059 ASSERT(data != NULL); 4060 4061 dnode_t *dn = dr->dr_dnode; 4062 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 4063 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 4064 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys)); 4065 4066 dbuf_sync_leaf_verify_bonus_dnode(dr); 4067 4068 dbuf_undirty_bonus(dr); 4069 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4070 } 4071 4072 /* 4073 * When syncing out a blocks of dnodes, adjust the block to deal with 4074 * encryption. Normally, we make sure the block is decrypted before writing 4075 * it. If we have crypt params, then we are writing a raw (encrypted) block, 4076 * from a raw receive. In this case, set the ARC buf's crypt params so 4077 * that the BP will be filled with the correct byteorder, salt, iv, and mac. 4078 */ 4079 static void 4080 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) 4081 { 4082 int err; 4083 dmu_buf_impl_t *db = dr->dr_dbuf; 4084 4085 ASSERT(MUTEX_HELD(&db->db_mtx)); 4086 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 4087 ASSERT3U(db->db_level, ==, 0); 4088 4089 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { 4090 zbookmark_phys_t zb; 4091 4092 /* 4093 * Unfortunately, there is currently no mechanism for 4094 * syncing context to handle decryption errors. An error 4095 * here is only possible if an attacker maliciously 4096 * changed a dnode block and updated the associated 4097 * checksums going up the block tree. 4098 */ 4099 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 4100 db->db.db_object, db->db_level, db->db_blkid); 4101 err = arc_untransform(db->db_buf, db->db_objset->os_spa, 4102 &zb, B_TRUE); 4103 if (err) 4104 panic("Invalid dnode block MAC"); 4105 } else if (dr->dt.dl.dr_has_raw_params) { 4106 (void) arc_release(dr->dt.dl.dr_data, db); 4107 arc_convert_to_raw(dr->dt.dl.dr_data, 4108 dmu_objset_id(db->db_objset), 4109 dr->dt.dl.dr_byteorder, DMU_OT_DNODE, 4110 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); 4111 } 4112 } 4113 4114 /* 4115 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it 4116 * is critical the we not allow the compiler to inline this function in to 4117 * dbuf_sync_list() thereby drastically bloating the stack usage. 4118 */ 4119 noinline static void 4120 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4121 { 4122 dmu_buf_impl_t *db = dr->dr_dbuf; 4123 dnode_t *dn = dr->dr_dnode; 4124 4125 ASSERT(dmu_tx_is_syncing(tx)); 4126 4127 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4128 4129 mutex_enter(&db->db_mtx); 4130 4131 ASSERT(db->db_level > 0); 4132 DBUF_VERIFY(db); 4133 4134 /* Read the block if it hasn't been read yet. */ 4135 if (db->db_buf == NULL) { 4136 mutex_exit(&db->db_mtx); 4137 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 4138 mutex_enter(&db->db_mtx); 4139 } 4140 ASSERT3U(db->db_state, ==, DB_CACHED); 4141 ASSERT(db->db_buf != NULL); 4142 4143 /* Indirect block size must match what the dnode thinks it is. */ 4144 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4145 dbuf_check_blkptr(dn, db); 4146 4147 /* Provide the pending dirty record to child dbufs */ 4148 db->db_data_pending = dr; 4149 4150 mutex_exit(&db->db_mtx); 4151 4152 dbuf_write(dr, db->db_buf, tx); 4153 4154 zio_t *zio = dr->dr_zio; 4155 mutex_enter(&dr->dt.di.dr_mtx); 4156 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 4157 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4158 mutex_exit(&dr->dt.di.dr_mtx); 4159 zio_nowait(zio); 4160 } 4161 4162 /* 4163 * Verify that the size of the data in our bonus buffer does not exceed 4164 * its recorded size. 4165 * 4166 * The purpose of this verification is to catch any cases in development 4167 * where the size of a phys structure (i.e space_map_phys_t) grows and, 4168 * due to incorrect feature management, older pools expect to read more 4169 * data even though they didn't actually write it to begin with. 4170 * 4171 * For a example, this would catch an error in the feature logic where we 4172 * open an older pool and we expect to write the space map histogram of 4173 * a space map with size SPACE_MAP_SIZE_V0. 4174 */ 4175 static void 4176 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr) 4177 { 4178 #ifdef ZFS_DEBUG 4179 dnode_t *dn = dr->dr_dnode; 4180 4181 /* 4182 * Encrypted bonus buffers can have data past their bonuslen. 4183 * Skip the verification of these blocks. 4184 */ 4185 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)) 4186 return; 4187 4188 uint16_t bonuslen = dn->dn_phys->dn_bonuslen; 4189 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 4190 ASSERT3U(bonuslen, <=, maxbonuslen); 4191 4192 arc_buf_t *datap = dr->dt.dl.dr_data; 4193 char *datap_end = ((char *)datap) + bonuslen; 4194 char *datap_max = ((char *)datap) + maxbonuslen; 4195 4196 /* ensure that everything is zero after our data */ 4197 for (; datap_end < datap_max; datap_end++) 4198 ASSERT(*datap_end == 0); 4199 #endif 4200 } 4201 4202 static blkptr_t * 4203 dbuf_lightweight_bp(dbuf_dirty_record_t *dr) 4204 { 4205 /* This must be a lightweight dirty record. */ 4206 ASSERT3P(dr->dr_dbuf, ==, NULL); 4207 dnode_t *dn = dr->dr_dnode; 4208 4209 if (dn->dn_phys->dn_nlevels == 1) { 4210 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr); 4211 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]); 4212 } else { 4213 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf; 4214 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 4215 VERIFY3U(parent_db->db_level, ==, 1); 4216 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn); 4217 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid); 4218 blkptr_t *bp = parent_db->db.db_data; 4219 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]); 4220 } 4221 } 4222 4223 static void 4224 dbuf_lightweight_ready(zio_t *zio) 4225 { 4226 dbuf_dirty_record_t *dr = zio->io_private; 4227 blkptr_t *bp = zio->io_bp; 4228 4229 if (zio->io_error != 0) 4230 return; 4231 4232 dnode_t *dn = dr->dr_dnode; 4233 4234 blkptr_t *bp_orig = dbuf_lightweight_bp(dr); 4235 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4236 int64_t delta = bp_get_dsize_sync(spa, bp) - 4237 bp_get_dsize_sync(spa, bp_orig); 4238 dnode_diduse_space(dn, delta); 4239 4240 uint64_t blkid = dr->dt.dll.dr_blkid; 4241 mutex_enter(&dn->dn_mtx); 4242 if (blkid > dn->dn_phys->dn_maxblkid) { 4243 ASSERT0(dn->dn_objset->os_raw_receive); 4244 dn->dn_phys->dn_maxblkid = blkid; 4245 } 4246 mutex_exit(&dn->dn_mtx); 4247 4248 if (!BP_IS_EMBEDDED(bp)) { 4249 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1; 4250 BP_SET_FILL(bp, fill); 4251 } 4252 4253 dmu_buf_impl_t *parent_db; 4254 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1); 4255 if (dr->dr_parent == NULL) { 4256 parent_db = dn->dn_dbuf; 4257 } else { 4258 parent_db = dr->dr_parent->dr_dbuf; 4259 } 4260 rw_enter(&parent_db->db_rwlock, RW_WRITER); 4261 *bp_orig = *bp; 4262 rw_exit(&parent_db->db_rwlock); 4263 } 4264 4265 static void 4266 dbuf_lightweight_physdone(zio_t *zio) 4267 { 4268 dbuf_dirty_record_t *dr = zio->io_private; 4269 dsl_pool_t *dp = spa_get_dsl(zio->io_spa); 4270 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 4271 4272 /* 4273 * The callback will be called io_phys_children times. Retire one 4274 * portion of our dirty space each time we are called. Any rounding 4275 * error will be cleaned up by dbuf_lightweight_done(). 4276 */ 4277 int delta = dr->dr_accounted / zio->io_phys_children; 4278 dsl_pool_undirty_space(dp, delta, zio->io_txg); 4279 } 4280 4281 static void 4282 dbuf_lightweight_done(zio_t *zio) 4283 { 4284 dbuf_dirty_record_t *dr = zio->io_private; 4285 4286 VERIFY0(zio->io_error); 4287 4288 objset_t *os = dr->dr_dnode->dn_objset; 4289 dmu_tx_t *tx = os->os_synctx; 4290 4291 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4292 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4293 } else { 4294 dsl_dataset_t *ds = os->os_dsl_dataset; 4295 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE); 4296 dsl_dataset_block_born(ds, zio->io_bp, tx); 4297 } 4298 4299 /* 4300 * See comment in dbuf_write_done(). 4301 */ 4302 if (zio->io_phys_children == 0) { 4303 dsl_pool_undirty_space(dmu_objset_pool(os), 4304 dr->dr_accounted, zio->io_txg); 4305 } else { 4306 dsl_pool_undirty_space(dmu_objset_pool(os), 4307 dr->dr_accounted % zio->io_phys_children, zio->io_txg); 4308 } 4309 4310 abd_free(dr->dt.dll.dr_abd); 4311 kmem_free(dr, sizeof (*dr)); 4312 } 4313 4314 noinline static void 4315 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4316 { 4317 dnode_t *dn = dr->dr_dnode; 4318 zio_t *pio; 4319 if (dn->dn_phys->dn_nlevels == 1) { 4320 pio = dn->dn_zio; 4321 } else { 4322 pio = dr->dr_parent->dr_zio; 4323 } 4324 4325 zbookmark_phys_t zb = { 4326 .zb_objset = dmu_objset_id(dn->dn_objset), 4327 .zb_object = dn->dn_object, 4328 .zb_level = 0, 4329 .zb_blkid = dr->dt.dll.dr_blkid, 4330 }; 4331 4332 /* 4333 * See comment in dbuf_write(). This is so that zio->io_bp_orig 4334 * will have the old BP in dbuf_lightweight_done(). 4335 */ 4336 dr->dr_bp_copy = *dbuf_lightweight_bp(dr); 4337 4338 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset), 4339 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd, 4340 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd), 4341 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL, 4342 dbuf_lightweight_physdone, dbuf_lightweight_done, dr, 4343 ZIO_PRIORITY_ASYNC_WRITE, 4344 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb); 4345 4346 zio_nowait(dr->dr_zio); 4347 } 4348 4349 /* 4350 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is 4351 * critical the we not allow the compiler to inline this function in to 4352 * dbuf_sync_list() thereby drastically bloating the stack usage. 4353 */ 4354 noinline static void 4355 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4356 { 4357 arc_buf_t **datap = &dr->dt.dl.dr_data; 4358 dmu_buf_impl_t *db = dr->dr_dbuf; 4359 dnode_t *dn = dr->dr_dnode; 4360 objset_t *os; 4361 uint64_t txg = tx->tx_txg; 4362 4363 ASSERT(dmu_tx_is_syncing(tx)); 4364 4365 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4366 4367 mutex_enter(&db->db_mtx); 4368 /* 4369 * To be synced, we must be dirtied. But we 4370 * might have been freed after the dirty. 4371 */ 4372 if (db->db_state == DB_UNCACHED) { 4373 /* This buffer has been freed since it was dirtied */ 4374 ASSERT(db->db.db_data == NULL); 4375 } else if (db->db_state == DB_FILL) { 4376 /* This buffer was freed and is now being re-filled */ 4377 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 4378 } else { 4379 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 4380 } 4381 DBUF_VERIFY(db); 4382 4383 if (db->db_blkid == DMU_SPILL_BLKID) { 4384 mutex_enter(&dn->dn_mtx); 4385 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 4386 /* 4387 * In the previous transaction group, the bonus buffer 4388 * was entirely used to store the attributes for the 4389 * dnode which overrode the dn_spill field. However, 4390 * when adding more attributes to the file a spill 4391 * block was required to hold the extra attributes. 4392 * 4393 * Make sure to clear the garbage left in the dn_spill 4394 * field from the previous attributes in the bonus 4395 * buffer. Otherwise, after writing out the spill 4396 * block to the new allocated dva, it will free 4397 * the old block pointed to by the invalid dn_spill. 4398 */ 4399 db->db_blkptr = NULL; 4400 } 4401 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 4402 mutex_exit(&dn->dn_mtx); 4403 } 4404 4405 /* 4406 * If this is a bonus buffer, simply copy the bonus data into the 4407 * dnode. It will be written out when the dnode is synced (and it 4408 * will be synced, since it must have been dirty for dbuf_sync to 4409 * be called). 4410 */ 4411 if (db->db_blkid == DMU_BONUS_BLKID) { 4412 ASSERT(dr->dr_dbuf == db); 4413 dbuf_sync_bonus(dr, tx); 4414 return; 4415 } 4416 4417 os = dn->dn_objset; 4418 4419 /* 4420 * This function may have dropped the db_mtx lock allowing a dmu_sync 4421 * operation to sneak in. As a result, we need to ensure that we 4422 * don't check the dr_override_state until we have returned from 4423 * dbuf_check_blkptr. 4424 */ 4425 dbuf_check_blkptr(dn, db); 4426 4427 /* 4428 * If this buffer is in the middle of an immediate write, 4429 * wait for the synchronous IO to complete. 4430 */ 4431 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 4432 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 4433 cv_wait(&db->db_changed, &db->db_mtx); 4434 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 4435 } 4436 4437 /* 4438 * If this is a dnode block, ensure it is appropriately encrypted 4439 * or decrypted, depending on what we are writing to it this txg. 4440 */ 4441 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) 4442 dbuf_prepare_encrypted_dnode_leaf(dr); 4443 4444 if (db->db_state != DB_NOFILL && 4445 dn->dn_object != DMU_META_DNODE_OBJECT && 4446 zfs_refcount_count(&db->db_holds) > 1 && 4447 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 4448 *datap == db->db_buf) { 4449 /* 4450 * If this buffer is currently "in use" (i.e., there 4451 * are active holds and db_data still references it), 4452 * then make a copy before we start the write so that 4453 * any modifications from the open txg will not leak 4454 * into this write. 4455 * 4456 * NOTE: this copy does not need to be made for 4457 * objects only modified in the syncing context (e.g. 4458 * DNONE_DNODE blocks). 4459 */ 4460 int psize = arc_buf_size(*datap); 4461 int lsize = arc_buf_lsize(*datap); 4462 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 4463 enum zio_compress compress_type = arc_get_compression(*datap); 4464 uint8_t complevel = arc_get_complevel(*datap); 4465 4466 if (arc_is_encrypted(*datap)) { 4467 boolean_t byteorder; 4468 uint8_t salt[ZIO_DATA_SALT_LEN]; 4469 uint8_t iv[ZIO_DATA_IV_LEN]; 4470 uint8_t mac[ZIO_DATA_MAC_LEN]; 4471 4472 arc_get_raw_params(*datap, &byteorder, salt, iv, mac); 4473 *datap = arc_alloc_raw_buf(os->os_spa, db, 4474 dmu_objset_id(os), byteorder, salt, iv, mac, 4475 dn->dn_type, psize, lsize, compress_type, 4476 complevel); 4477 } else if (compress_type != ZIO_COMPRESS_OFF) { 4478 ASSERT3U(type, ==, ARC_BUFC_DATA); 4479 *datap = arc_alloc_compressed_buf(os->os_spa, db, 4480 psize, lsize, compress_type, complevel); 4481 } else { 4482 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 4483 } 4484 memcpy((*datap)->b_data, db->db.db_data, psize); 4485 } 4486 db->db_data_pending = dr; 4487 4488 mutex_exit(&db->db_mtx); 4489 4490 dbuf_write(dr, *datap, tx); 4491 4492 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4493 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 4494 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr); 4495 } else { 4496 zio_nowait(dr->dr_zio); 4497 } 4498 } 4499 4500 void 4501 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 4502 { 4503 dbuf_dirty_record_t *dr; 4504 4505 while ((dr = list_head(list))) { 4506 if (dr->dr_zio != NULL) { 4507 /* 4508 * If we find an already initialized zio then we 4509 * are processing the meta-dnode, and we have finished. 4510 * The dbufs for all dnodes are put back on the list 4511 * during processing, so that we can zio_wait() 4512 * these IOs after initiating all child IOs. 4513 */ 4514 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 4515 DMU_META_DNODE_OBJECT); 4516 break; 4517 } 4518 list_remove(list, dr); 4519 if (dr->dr_dbuf == NULL) { 4520 dbuf_sync_lightweight(dr, tx); 4521 } else { 4522 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 4523 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 4524 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 4525 } 4526 if (dr->dr_dbuf->db_level > 0) 4527 dbuf_sync_indirect(dr, tx); 4528 else 4529 dbuf_sync_leaf(dr, tx); 4530 } 4531 } 4532 } 4533 4534 static void 4535 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4536 { 4537 (void) buf; 4538 dmu_buf_impl_t *db = vdb; 4539 dnode_t *dn; 4540 blkptr_t *bp = zio->io_bp; 4541 blkptr_t *bp_orig = &zio->io_bp_orig; 4542 spa_t *spa = zio->io_spa; 4543 int64_t delta; 4544 uint64_t fill = 0; 4545 int i; 4546 4547 ASSERT3P(db->db_blkptr, !=, NULL); 4548 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 4549 4550 DB_DNODE_ENTER(db); 4551 dn = DB_DNODE(db); 4552 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 4553 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 4554 zio->io_prev_space_delta = delta; 4555 4556 if (bp->blk_birth != 0) { 4557 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 4558 BP_GET_TYPE(bp) == dn->dn_type) || 4559 (db->db_blkid == DMU_SPILL_BLKID && 4560 BP_GET_TYPE(bp) == dn->dn_bonustype) || 4561 BP_IS_EMBEDDED(bp)); 4562 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 4563 } 4564 4565 mutex_enter(&db->db_mtx); 4566 4567 #ifdef ZFS_DEBUG 4568 if (db->db_blkid == DMU_SPILL_BLKID) { 4569 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4570 ASSERT(!(BP_IS_HOLE(bp)) && 4571 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4572 } 4573 #endif 4574 4575 if (db->db_level == 0) { 4576 mutex_enter(&dn->dn_mtx); 4577 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 4578 db->db_blkid != DMU_SPILL_BLKID) { 4579 ASSERT0(db->db_objset->os_raw_receive); 4580 dn->dn_phys->dn_maxblkid = db->db_blkid; 4581 } 4582 mutex_exit(&dn->dn_mtx); 4583 4584 if (dn->dn_type == DMU_OT_DNODE) { 4585 i = 0; 4586 while (i < db->db.db_size) { 4587 dnode_phys_t *dnp = 4588 (void *)(((char *)db->db.db_data) + i); 4589 4590 i += DNODE_MIN_SIZE; 4591 if (dnp->dn_type != DMU_OT_NONE) { 4592 fill++; 4593 i += dnp->dn_extra_slots * 4594 DNODE_MIN_SIZE; 4595 } 4596 } 4597 } else { 4598 if (BP_IS_HOLE(bp)) { 4599 fill = 0; 4600 } else { 4601 fill = 1; 4602 } 4603 } 4604 } else { 4605 blkptr_t *ibp = db->db.db_data; 4606 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4607 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 4608 if (BP_IS_HOLE(ibp)) 4609 continue; 4610 fill += BP_GET_FILL(ibp); 4611 } 4612 } 4613 DB_DNODE_EXIT(db); 4614 4615 if (!BP_IS_EMBEDDED(bp)) 4616 BP_SET_FILL(bp, fill); 4617 4618 mutex_exit(&db->db_mtx); 4619 4620 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG); 4621 *db->db_blkptr = *bp; 4622 dmu_buf_unlock_parent(db, dblt, FTAG); 4623 } 4624 4625 /* 4626 * This function gets called just prior to running through the compression 4627 * stage of the zio pipeline. If we're an indirect block comprised of only 4628 * holes, then we want this indirect to be compressed away to a hole. In 4629 * order to do that we must zero out any information about the holes that 4630 * this indirect points to prior to before we try to compress it. 4631 */ 4632 static void 4633 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4634 { 4635 (void) zio, (void) buf; 4636 dmu_buf_impl_t *db = vdb; 4637 dnode_t *dn; 4638 blkptr_t *bp; 4639 unsigned int epbs, i; 4640 4641 ASSERT3U(db->db_level, >, 0); 4642 DB_DNODE_ENTER(db); 4643 dn = DB_DNODE(db); 4644 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4645 ASSERT3U(epbs, <, 31); 4646 4647 /* Determine if all our children are holes */ 4648 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { 4649 if (!BP_IS_HOLE(bp)) 4650 break; 4651 } 4652 4653 /* 4654 * If all the children are holes, then zero them all out so that 4655 * we may get compressed away. 4656 */ 4657 if (i == 1ULL << epbs) { 4658 /* 4659 * We only found holes. Grab the rwlock to prevent 4660 * anybody from reading the blocks we're about to 4661 * zero out. 4662 */ 4663 rw_enter(&db->db_rwlock, RW_WRITER); 4664 memset(db->db.db_data, 0, db->db.db_size); 4665 rw_exit(&db->db_rwlock); 4666 } 4667 DB_DNODE_EXIT(db); 4668 } 4669 4670 /* 4671 * The SPA will call this callback several times for each zio - once 4672 * for every physical child i/o (zio->io_phys_children times). This 4673 * allows the DMU to monitor the progress of each logical i/o. For example, 4674 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 4675 * block. There may be a long delay before all copies/fragments are completed, 4676 * so this callback allows us to retire dirty space gradually, as the physical 4677 * i/os complete. 4678 */ 4679 static void 4680 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 4681 { 4682 (void) buf; 4683 dmu_buf_impl_t *db = arg; 4684 objset_t *os = db->db_objset; 4685 dsl_pool_t *dp = dmu_objset_pool(os); 4686 dbuf_dirty_record_t *dr; 4687 int delta = 0; 4688 4689 dr = db->db_data_pending; 4690 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 4691 4692 /* 4693 * The callback will be called io_phys_children times. Retire one 4694 * portion of our dirty space each time we are called. Any rounding 4695 * error will be cleaned up by dbuf_write_done(). 4696 */ 4697 delta = dr->dr_accounted / zio->io_phys_children; 4698 dsl_pool_undirty_space(dp, delta, zio->io_txg); 4699 } 4700 4701 static void 4702 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 4703 { 4704 (void) buf; 4705 dmu_buf_impl_t *db = vdb; 4706 blkptr_t *bp_orig = &zio->io_bp_orig; 4707 blkptr_t *bp = db->db_blkptr; 4708 objset_t *os = db->db_objset; 4709 dmu_tx_t *tx = os->os_synctx; 4710 4711 ASSERT0(zio->io_error); 4712 ASSERT(db->db_blkptr == bp); 4713 4714 /* 4715 * For nopwrites and rewrites we ensure that the bp matches our 4716 * original and bypass all the accounting. 4717 */ 4718 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4719 ASSERT(BP_EQUAL(bp, bp_orig)); 4720 } else { 4721 dsl_dataset_t *ds = os->os_dsl_dataset; 4722 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 4723 dsl_dataset_block_born(ds, bp, tx); 4724 } 4725 4726 mutex_enter(&db->db_mtx); 4727 4728 DBUF_VERIFY(db); 4729 4730 dbuf_dirty_record_t *dr = db->db_data_pending; 4731 dnode_t *dn = dr->dr_dnode; 4732 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4733 ASSERT(dr->dr_dbuf == db); 4734 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 4735 list_remove(&db->db_dirty_records, dr); 4736 4737 #ifdef ZFS_DEBUG 4738 if (db->db_blkid == DMU_SPILL_BLKID) { 4739 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4740 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 4741 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4742 } 4743 #endif 4744 4745 if (db->db_level == 0) { 4746 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 4747 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 4748 if (db->db_state != DB_NOFILL) { 4749 if (dr->dt.dl.dr_data != db->db_buf) 4750 arc_buf_destroy(dr->dt.dl.dr_data, db); 4751 } 4752 } else { 4753 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4754 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 4755 if (!BP_IS_HOLE(db->db_blkptr)) { 4756 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift - 4757 SPA_BLKPTRSHIFT; 4758 ASSERT3U(db->db_blkid, <=, 4759 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 4760 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 4761 db->db.db_size); 4762 } 4763 mutex_destroy(&dr->dt.di.dr_mtx); 4764 list_destroy(&dr->dt.di.dr_children); 4765 } 4766 4767 cv_broadcast(&db->db_changed); 4768 ASSERT(db->db_dirtycnt > 0); 4769 db->db_dirtycnt -= 1; 4770 db->db_data_pending = NULL; 4771 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4772 4773 /* 4774 * If we didn't do a physical write in this ZIO and we 4775 * still ended up here, it means that the space of the 4776 * dbuf that we just released (and undirtied) above hasn't 4777 * been marked as undirtied in the pool's accounting. 4778 * 4779 * Thus, we undirty that space in the pool's view of the 4780 * world here. For physical writes this type of update 4781 * happens in dbuf_write_physdone(). 4782 * 4783 * If we did a physical write, cleanup any rounding errors 4784 * that came up due to writing multiple copies of a block 4785 * on disk [see dbuf_write_physdone()]. 4786 */ 4787 if (zio->io_phys_children == 0) { 4788 dsl_pool_undirty_space(dmu_objset_pool(os), 4789 dr->dr_accounted, zio->io_txg); 4790 } else { 4791 dsl_pool_undirty_space(dmu_objset_pool(os), 4792 dr->dr_accounted % zio->io_phys_children, zio->io_txg); 4793 } 4794 4795 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 4796 } 4797 4798 static void 4799 dbuf_write_nofill_ready(zio_t *zio) 4800 { 4801 dbuf_write_ready(zio, NULL, zio->io_private); 4802 } 4803 4804 static void 4805 dbuf_write_nofill_done(zio_t *zio) 4806 { 4807 dbuf_write_done(zio, NULL, zio->io_private); 4808 } 4809 4810 static void 4811 dbuf_write_override_ready(zio_t *zio) 4812 { 4813 dbuf_dirty_record_t *dr = zio->io_private; 4814 dmu_buf_impl_t *db = dr->dr_dbuf; 4815 4816 dbuf_write_ready(zio, NULL, db); 4817 } 4818 4819 static void 4820 dbuf_write_override_done(zio_t *zio) 4821 { 4822 dbuf_dirty_record_t *dr = zio->io_private; 4823 dmu_buf_impl_t *db = dr->dr_dbuf; 4824 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 4825 4826 mutex_enter(&db->db_mtx); 4827 if (!BP_EQUAL(zio->io_bp, obp)) { 4828 if (!BP_IS_HOLE(obp)) 4829 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 4830 arc_release(dr->dt.dl.dr_data, db); 4831 } 4832 mutex_exit(&db->db_mtx); 4833 4834 dbuf_write_done(zio, NULL, db); 4835 4836 if (zio->io_abd != NULL) 4837 abd_free(zio->io_abd); 4838 } 4839 4840 typedef struct dbuf_remap_impl_callback_arg { 4841 objset_t *drica_os; 4842 uint64_t drica_blk_birth; 4843 dmu_tx_t *drica_tx; 4844 } dbuf_remap_impl_callback_arg_t; 4845 4846 static void 4847 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 4848 void *arg) 4849 { 4850 dbuf_remap_impl_callback_arg_t *drica = arg; 4851 objset_t *os = drica->drica_os; 4852 spa_t *spa = dmu_objset_spa(os); 4853 dmu_tx_t *tx = drica->drica_tx; 4854 4855 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4856 4857 if (os == spa_meta_objset(spa)) { 4858 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 4859 } else { 4860 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 4861 size, drica->drica_blk_birth, tx); 4862 } 4863 } 4864 4865 static void 4866 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) 4867 { 4868 blkptr_t bp_copy = *bp; 4869 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4870 dbuf_remap_impl_callback_arg_t drica; 4871 4872 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4873 4874 drica.drica_os = dn->dn_objset; 4875 drica.drica_blk_birth = bp->blk_birth; 4876 drica.drica_tx = tx; 4877 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 4878 &drica)) { 4879 /* 4880 * If the blkptr being remapped is tracked by a livelist, 4881 * then we need to make sure the livelist reflects the update. 4882 * First, cancel out the old blkptr by appending a 'FREE' 4883 * entry. Next, add an 'ALLOC' to track the new version. This 4884 * way we avoid trying to free an inaccurate blkptr at delete. 4885 * Note that embedded blkptrs are not tracked in livelists. 4886 */ 4887 if (dn->dn_objset != spa_meta_objset(spa)) { 4888 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset); 4889 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && 4890 bp->blk_birth > ds->ds_dir->dd_origin_txg) { 4891 ASSERT(!BP_IS_EMBEDDED(bp)); 4892 ASSERT(dsl_dir_is_clone(ds->ds_dir)); 4893 ASSERT(spa_feature_is_enabled(spa, 4894 SPA_FEATURE_LIVELIST)); 4895 bplist_append(&ds->ds_dir->dd_pending_frees, 4896 bp); 4897 bplist_append(&ds->ds_dir->dd_pending_allocs, 4898 &bp_copy); 4899 } 4900 } 4901 4902 /* 4903 * The db_rwlock prevents dbuf_read_impl() from 4904 * dereferencing the BP while we are changing it. To 4905 * avoid lock contention, only grab it when we are actually 4906 * changing the BP. 4907 */ 4908 if (rw != NULL) 4909 rw_enter(rw, RW_WRITER); 4910 *bp = bp_copy; 4911 if (rw != NULL) 4912 rw_exit(rw); 4913 } 4914 } 4915 4916 /* 4917 * Remap any existing BP's to concrete vdevs, if possible. 4918 */ 4919 static void 4920 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 4921 { 4922 spa_t *spa = dmu_objset_spa(db->db_objset); 4923 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4924 4925 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 4926 return; 4927 4928 if (db->db_level > 0) { 4929 blkptr_t *bp = db->db.db_data; 4930 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 4931 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx); 4932 } 4933 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 4934 dnode_phys_t *dnp = db->db.db_data; 4935 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 4936 DMU_OT_DNODE); 4937 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; 4938 i += dnp[i].dn_extra_slots + 1) { 4939 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 4940 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL : 4941 &dn->dn_dbuf->db_rwlock); 4942 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock, 4943 tx); 4944 } 4945 } 4946 } 4947 } 4948 4949 4950 /* Issue I/O to commit a dirty buffer to disk. */ 4951 static void 4952 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 4953 { 4954 dmu_buf_impl_t *db = dr->dr_dbuf; 4955 dnode_t *dn = dr->dr_dnode; 4956 objset_t *os; 4957 dmu_buf_impl_t *parent = db->db_parent; 4958 uint64_t txg = tx->tx_txg; 4959 zbookmark_phys_t zb; 4960 zio_prop_t zp; 4961 zio_t *pio; /* parent I/O */ 4962 int wp_flag = 0; 4963 4964 ASSERT(dmu_tx_is_syncing(tx)); 4965 4966 os = dn->dn_objset; 4967 4968 if (db->db_state != DB_NOFILL) { 4969 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 4970 /* 4971 * Private object buffers are released here rather 4972 * than in dbuf_dirty() since they are only modified 4973 * in the syncing context and we don't want the 4974 * overhead of making multiple copies of the data. 4975 */ 4976 if (BP_IS_HOLE(db->db_blkptr)) { 4977 arc_buf_thaw(data); 4978 } else { 4979 dbuf_release_bp(db); 4980 } 4981 dbuf_remap(dn, db, tx); 4982 } 4983 } 4984 4985 if (parent != dn->dn_dbuf) { 4986 /* Our parent is an indirect block. */ 4987 /* We have a dirty parent that has been scheduled for write. */ 4988 ASSERT(parent && parent->db_data_pending); 4989 /* Our parent's buffer is one level closer to the dnode. */ 4990 ASSERT(db->db_level == parent->db_level-1); 4991 /* 4992 * We're about to modify our parent's db_data by modifying 4993 * our block pointer, so the parent must be released. 4994 */ 4995 ASSERT(arc_released(parent->db_buf)); 4996 pio = parent->db_data_pending->dr_zio; 4997 } else { 4998 /* Our parent is the dnode itself. */ 4999 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 5000 db->db_blkid != DMU_SPILL_BLKID) || 5001 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 5002 if (db->db_blkid != DMU_SPILL_BLKID) 5003 ASSERT3P(db->db_blkptr, ==, 5004 &dn->dn_phys->dn_blkptr[db->db_blkid]); 5005 pio = dn->dn_zio; 5006 } 5007 5008 ASSERT(db->db_level == 0 || data == db->db_buf); 5009 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 5010 ASSERT(pio); 5011 5012 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 5013 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 5014 db->db.db_object, db->db_level, db->db_blkid); 5015 5016 if (db->db_blkid == DMU_SPILL_BLKID) 5017 wp_flag = WP_SPILL; 5018 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 5019 5020 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 5021 5022 /* 5023 * We copy the blkptr now (rather than when we instantiate the dirty 5024 * record), because its value can change between open context and 5025 * syncing context. We do not need to hold dn_struct_rwlock to read 5026 * db_blkptr because we are in syncing context. 5027 */ 5028 dr->dr_bp_copy = *db->db_blkptr; 5029 5030 if (db->db_level == 0 && 5031 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 5032 /* 5033 * The BP for this block has been provided by open context 5034 * (by dmu_sync() or dmu_buf_write_embedded()). 5035 */ 5036 abd_t *contents = (data != NULL) ? 5037 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 5038 5039 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy, 5040 contents, db->db.db_size, db->db.db_size, &zp, 5041 dbuf_write_override_ready, NULL, NULL, 5042 dbuf_write_override_done, 5043 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 5044 mutex_enter(&db->db_mtx); 5045 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 5046 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 5047 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 5048 mutex_exit(&db->db_mtx); 5049 } else if (db->db_state == DB_NOFILL) { 5050 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 5051 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 5052 dr->dr_zio = zio_write(pio, os->os_spa, txg, 5053 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 5054 dbuf_write_nofill_ready, NULL, NULL, 5055 dbuf_write_nofill_done, db, 5056 ZIO_PRIORITY_ASYNC_WRITE, 5057 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 5058 } else { 5059 ASSERT(arc_released(data)); 5060 5061 /* 5062 * For indirect blocks, we want to setup the children 5063 * ready callback so that we can properly handle an indirect 5064 * block that only contains holes. 5065 */ 5066 arc_write_done_func_t *children_ready_cb = NULL; 5067 if (db->db_level != 0) 5068 children_ready_cb = dbuf_write_children_ready; 5069 5070 dr->dr_zio = arc_write(pio, os->os_spa, txg, 5071 &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db), 5072 dbuf_is_l2cacheable(db), &zp, dbuf_write_ready, 5073 children_ready_cb, dbuf_write_physdone, 5074 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, 5075 ZIO_FLAG_MUSTSUCCEED, &zb); 5076 } 5077 } 5078 5079 EXPORT_SYMBOL(dbuf_find); 5080 EXPORT_SYMBOL(dbuf_is_metadata); 5081 EXPORT_SYMBOL(dbuf_destroy); 5082 EXPORT_SYMBOL(dbuf_loan_arcbuf); 5083 EXPORT_SYMBOL(dbuf_whichblock); 5084 EXPORT_SYMBOL(dbuf_read); 5085 EXPORT_SYMBOL(dbuf_unoverride); 5086 EXPORT_SYMBOL(dbuf_free_range); 5087 EXPORT_SYMBOL(dbuf_new_size); 5088 EXPORT_SYMBOL(dbuf_release_bp); 5089 EXPORT_SYMBOL(dbuf_dirty); 5090 EXPORT_SYMBOL(dmu_buf_set_crypt_params); 5091 EXPORT_SYMBOL(dmu_buf_will_dirty); 5092 EXPORT_SYMBOL(dmu_buf_is_dirty); 5093 EXPORT_SYMBOL(dmu_buf_will_not_fill); 5094 EXPORT_SYMBOL(dmu_buf_will_fill); 5095 EXPORT_SYMBOL(dmu_buf_fill_done); 5096 EXPORT_SYMBOL(dmu_buf_rele); 5097 EXPORT_SYMBOL(dbuf_assign_arcbuf); 5098 EXPORT_SYMBOL(dbuf_prefetch); 5099 EXPORT_SYMBOL(dbuf_hold_impl); 5100 EXPORT_SYMBOL(dbuf_hold); 5101 EXPORT_SYMBOL(dbuf_hold_level); 5102 EXPORT_SYMBOL(dbuf_create_bonus); 5103 EXPORT_SYMBOL(dbuf_spill_set_blksz); 5104 EXPORT_SYMBOL(dbuf_rm_spill); 5105 EXPORT_SYMBOL(dbuf_add_ref); 5106 EXPORT_SYMBOL(dbuf_rele); 5107 EXPORT_SYMBOL(dbuf_rele_and_unlock); 5108 EXPORT_SYMBOL(dbuf_refcount); 5109 EXPORT_SYMBOL(dbuf_sync_list); 5110 EXPORT_SYMBOL(dmu_buf_set_user); 5111 EXPORT_SYMBOL(dmu_buf_set_user_ie); 5112 EXPORT_SYMBOL(dmu_buf_get_user); 5113 EXPORT_SYMBOL(dmu_buf_get_blkptr); 5114 5115 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW, 5116 "Maximum size in bytes of the dbuf cache."); 5117 5118 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW, 5119 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction."); 5120 5121 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW, 5122 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops."); 5123 5124 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW, 5125 "Maximum size in bytes of dbuf metadata cache."); 5126 5127 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW, 5128 "Set size of dbuf cache to log2 fraction of arc size."); 5129 5130 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW, 5131 "Set size of dbuf metadata cache to log2 fraction of arc size."); 5132 5133 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD, 5134 "Set size of dbuf cache mutex array as log2 shift."); 5135