1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright (c) 2019, Klara Inc. 28 * Copyright (c) 2019, Allan Jude 29 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/arc.h> 34 #include <sys/dmu.h> 35 #include <sys/dmu_send.h> 36 #include <sys/dmu_impl.h> 37 #include <sys/dbuf.h> 38 #include <sys/dmu_objset.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/dsl_dir.h> 41 #include <sys/dmu_tx.h> 42 #include <sys/spa.h> 43 #include <sys/zio.h> 44 #include <sys/dmu_zfetch.h> 45 #include <sys/sa.h> 46 #include <sys/sa_impl.h> 47 #include <sys/zfeature.h> 48 #include <sys/blkptr.h> 49 #include <sys/range_tree.h> 50 #include <sys/trace_zfs.h> 51 #include <sys/callb.h> 52 #include <sys/abd.h> 53 #include <sys/brt.h> 54 #include <sys/vdev.h> 55 #include <cityhash.h> 56 #include <sys/spa_impl.h> 57 #include <sys/wmsum.h> 58 #include <sys/vdev_impl.h> 59 60 static kstat_t *dbuf_ksp; 61 62 typedef struct dbuf_stats { 63 /* 64 * Various statistics about the size of the dbuf cache. 65 */ 66 kstat_named_t cache_count; 67 kstat_named_t cache_size_bytes; 68 kstat_named_t cache_size_bytes_max; 69 /* 70 * Statistics regarding the bounds on the dbuf cache size. 71 */ 72 kstat_named_t cache_target_bytes; 73 kstat_named_t cache_lowater_bytes; 74 kstat_named_t cache_hiwater_bytes; 75 /* 76 * Total number of dbuf cache evictions that have occurred. 77 */ 78 kstat_named_t cache_total_evicts; 79 /* 80 * The distribution of dbuf levels in the dbuf cache and 81 * the total size of all dbufs at each level. 82 */ 83 kstat_named_t cache_levels[DN_MAX_LEVELS]; 84 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS]; 85 /* 86 * Statistics about the dbuf hash table. 87 */ 88 kstat_named_t hash_hits; 89 kstat_named_t hash_misses; 90 kstat_named_t hash_collisions; 91 kstat_named_t hash_elements; 92 kstat_named_t hash_elements_max; 93 /* 94 * Number of sublists containing more than one dbuf in the dbuf 95 * hash table. Keep track of the longest hash chain. 96 */ 97 kstat_named_t hash_chains; 98 kstat_named_t hash_chain_max; 99 /* 100 * Number of times a dbuf_create() discovers that a dbuf was 101 * already created and in the dbuf hash table. 102 */ 103 kstat_named_t hash_insert_race; 104 /* 105 * Number of entries in the hash table dbuf and mutex arrays. 106 */ 107 kstat_named_t hash_table_count; 108 kstat_named_t hash_mutex_count; 109 /* 110 * Statistics about the size of the metadata dbuf cache. 111 */ 112 kstat_named_t metadata_cache_count; 113 kstat_named_t metadata_cache_size_bytes; 114 kstat_named_t metadata_cache_size_bytes_max; 115 /* 116 * For diagnostic purposes, this is incremented whenever we can't add 117 * something to the metadata cache because it's full, and instead put 118 * the data in the regular dbuf cache. 119 */ 120 kstat_named_t metadata_cache_overflow; 121 } dbuf_stats_t; 122 123 dbuf_stats_t dbuf_stats = { 124 { "cache_count", KSTAT_DATA_UINT64 }, 125 { "cache_size_bytes", KSTAT_DATA_UINT64 }, 126 { "cache_size_bytes_max", KSTAT_DATA_UINT64 }, 127 { "cache_target_bytes", KSTAT_DATA_UINT64 }, 128 { "cache_lowater_bytes", KSTAT_DATA_UINT64 }, 129 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 }, 130 { "cache_total_evicts", KSTAT_DATA_UINT64 }, 131 { { "cache_levels_N", KSTAT_DATA_UINT64 } }, 132 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } }, 133 { "hash_hits", KSTAT_DATA_UINT64 }, 134 { "hash_misses", KSTAT_DATA_UINT64 }, 135 { "hash_collisions", KSTAT_DATA_UINT64 }, 136 { "hash_elements", KSTAT_DATA_UINT64 }, 137 { "hash_elements_max", KSTAT_DATA_UINT64 }, 138 { "hash_chains", KSTAT_DATA_UINT64 }, 139 { "hash_chain_max", KSTAT_DATA_UINT64 }, 140 { "hash_insert_race", KSTAT_DATA_UINT64 }, 141 { "hash_table_count", KSTAT_DATA_UINT64 }, 142 { "hash_mutex_count", KSTAT_DATA_UINT64 }, 143 { "metadata_cache_count", KSTAT_DATA_UINT64 }, 144 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 }, 145 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 }, 146 { "metadata_cache_overflow", KSTAT_DATA_UINT64 } 147 }; 148 149 struct { 150 wmsum_t cache_count; 151 wmsum_t cache_total_evicts; 152 wmsum_t cache_levels[DN_MAX_LEVELS]; 153 wmsum_t cache_levels_bytes[DN_MAX_LEVELS]; 154 wmsum_t hash_hits; 155 wmsum_t hash_misses; 156 wmsum_t hash_collisions; 157 wmsum_t hash_chains; 158 wmsum_t hash_insert_race; 159 wmsum_t metadata_cache_count; 160 wmsum_t metadata_cache_overflow; 161 } dbuf_sums; 162 163 #define DBUF_STAT_INCR(stat, val) \ 164 wmsum_add(&dbuf_sums.stat, val); 165 #define DBUF_STAT_DECR(stat, val) \ 166 DBUF_STAT_INCR(stat, -(val)); 167 #define DBUF_STAT_BUMP(stat) \ 168 DBUF_STAT_INCR(stat, 1); 169 #define DBUF_STAT_BUMPDOWN(stat) \ 170 DBUF_STAT_INCR(stat, -1); 171 #define DBUF_STAT_MAX(stat, v) { \ 172 uint64_t _m; \ 173 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \ 174 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\ 175 continue; \ 176 } 177 178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr); 180 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags); 181 182 /* 183 * Global data structures and functions for the dbuf cache. 184 */ 185 static kmem_cache_t *dbuf_kmem_cache; 186 static taskq_t *dbu_evict_taskq; 187 188 static kthread_t *dbuf_cache_evict_thread; 189 static kmutex_t dbuf_evict_lock; 190 static kcondvar_t dbuf_evict_cv; 191 static boolean_t dbuf_evict_thread_exit; 192 193 /* 194 * There are two dbuf caches; each dbuf can only be in one of them at a time. 195 * 196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 198 * that represent the metadata that describes filesystems/snapshots/ 199 * bookmarks/properties/etc. We only evict from this cache when we export a 200 * pool, to short-circuit as much I/O as possible for all administrative 201 * commands that need the metadata. There is no eviction policy for this 202 * cache, because we try to only include types in it which would occupy a 203 * very small amount of space per object but create a large impact on the 204 * performance of these commands. Instead, after it reaches a maximum size 205 * (which should only happen on very small memory systems with a very large 206 * number of filesystem objects), we stop taking new dbufs into the 207 * metadata cache, instead putting them in the normal dbuf cache. 208 * 209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 210 * are not currently held but have been recently released. These dbufs 211 * are not eligible for arc eviction until they are aged out of the cache. 212 * Dbufs that are aged out of the cache will be immediately destroyed and 213 * become eligible for arc eviction. 214 * 215 * Dbufs are added to these caches once the last hold is released. If a dbuf is 216 * later accessed and still exists in the dbuf cache, then it will be removed 217 * from the cache and later re-added to the head of the cache. 218 * 219 * If a given dbuf meets the requirements for the metadata cache, it will go 220 * there, otherwise it will be considered for the generic LRU dbuf cache. The 221 * caches and the refcounts tracking their sizes are stored in an array indexed 222 * by those caches' matching enum values (from dbuf_cached_state_t). 223 */ 224 typedef struct dbuf_cache { 225 multilist_t cache; 226 zfs_refcount_t size ____cacheline_aligned; 227 } dbuf_cache_t; 228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 229 230 /* Size limits for the caches */ 231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX; 232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX; 233 234 /* Set the default sizes of the caches to log2 fraction of arc size */ 235 static uint_t dbuf_cache_shift = 5; 236 static uint_t dbuf_metadata_cache_shift = 6; 237 238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */ 239 static uint_t dbuf_mutex_cache_shift = 0; 240 241 static unsigned long dbuf_cache_target_bytes(void); 242 static unsigned long dbuf_metadata_cache_target_bytes(void); 243 244 /* 245 * The LRU dbuf cache uses a three-stage eviction policy: 246 * - A low water marker designates when the dbuf eviction thread 247 * should stop evicting from the dbuf cache. 248 * - When we reach the maximum size (aka mid water mark), we 249 * signal the eviction thread to run. 250 * - The high water mark indicates when the eviction thread 251 * is unable to keep up with the incoming load and eviction must 252 * happen in the context of the calling thread. 253 * 254 * The dbuf cache: 255 * (max size) 256 * low water mid water hi water 257 * +----------------------------------------+----------+----------+ 258 * | | | | 259 * | | | | 260 * | | | | 261 * | | | | 262 * +----------------------------------------+----------+----------+ 263 * stop signal evict 264 * evicting eviction directly 265 * thread 266 * 267 * The high and low water marks indicate the operating range for the eviction 268 * thread. The low water mark is, by default, 90% of the total size of the 269 * cache and the high water mark is at 110% (both of these percentages can be 270 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 271 * respectively). The eviction thread will try to ensure that the cache remains 272 * within this range by waking up every second and checking if the cache is 273 * above the low water mark. The thread can also be woken up by callers adding 274 * elements into the cache if the cache is larger than the mid water (i.e max 275 * cache size). Once the eviction thread is woken up and eviction is required, 276 * it will continue evicting buffers until it's able to reduce the cache size 277 * to the low water mark. If the cache size continues to grow and hits the high 278 * water mark, then callers adding elements to the cache will begin to evict 279 * directly from the cache until the cache is no longer above the high water 280 * mark. 281 */ 282 283 /* 284 * The percentage above and below the maximum cache size. 285 */ 286 static uint_t dbuf_cache_hiwater_pct = 10; 287 static uint_t dbuf_cache_lowater_pct = 10; 288 289 static int 290 dbuf_cons(void *vdb, void *unused, int kmflag) 291 { 292 (void) unused, (void) kmflag; 293 dmu_buf_impl_t *db = vdb; 294 memset(db, 0, sizeof (dmu_buf_impl_t)); 295 296 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 297 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL); 298 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 299 multilist_link_init(&db->db_cache_link); 300 zfs_refcount_create(&db->db_holds); 301 302 return (0); 303 } 304 305 static void 306 dbuf_dest(void *vdb, void *unused) 307 { 308 (void) unused; 309 dmu_buf_impl_t *db = vdb; 310 mutex_destroy(&db->db_mtx); 311 rw_destroy(&db->db_rwlock); 312 cv_destroy(&db->db_changed); 313 ASSERT(!multilist_link_active(&db->db_cache_link)); 314 zfs_refcount_destroy(&db->db_holds); 315 } 316 317 /* 318 * dbuf hash table routines 319 */ 320 static dbuf_hash_table_t dbuf_hash_table; 321 322 /* 323 * We use Cityhash for this. It's fast, and has good hash properties without 324 * requiring any large static buffers. 325 */ 326 static uint64_t 327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 328 { 329 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 330 } 331 332 #define DTRACE_SET_STATE(db, why) \ 333 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \ 334 const char *, why) 335 336 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 337 ((dbuf)->db.db_object == (obj) && \ 338 (dbuf)->db_objset == (os) && \ 339 (dbuf)->db_level == (level) && \ 340 (dbuf)->db_blkid == (blkid)) 341 342 dmu_buf_impl_t * 343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid, 344 uint64_t *hash_out) 345 { 346 dbuf_hash_table_t *h = &dbuf_hash_table; 347 uint64_t hv; 348 uint64_t idx; 349 dmu_buf_impl_t *db; 350 351 hv = dbuf_hash(os, obj, level, blkid); 352 idx = hv & h->hash_table_mask; 353 354 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 355 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 356 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 357 mutex_enter(&db->db_mtx); 358 if (db->db_state != DB_EVICTING) { 359 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 360 return (db); 361 } 362 mutex_exit(&db->db_mtx); 363 } 364 } 365 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 366 if (hash_out != NULL) 367 *hash_out = hv; 368 return (NULL); 369 } 370 371 static dmu_buf_impl_t * 372 dbuf_find_bonus(objset_t *os, uint64_t object) 373 { 374 dnode_t *dn; 375 dmu_buf_impl_t *db = NULL; 376 377 if (dnode_hold(os, object, FTAG, &dn) == 0) { 378 rw_enter(&dn->dn_struct_rwlock, RW_READER); 379 if (dn->dn_bonus != NULL) { 380 db = dn->dn_bonus; 381 mutex_enter(&db->db_mtx); 382 } 383 rw_exit(&dn->dn_struct_rwlock); 384 dnode_rele(dn, FTAG); 385 } 386 return (db); 387 } 388 389 /* 390 * Insert an entry into the hash table. If there is already an element 391 * equal to elem in the hash table, then the already existing element 392 * will be returned and the new element will not be inserted. 393 * Otherwise returns NULL. 394 */ 395 static dmu_buf_impl_t * 396 dbuf_hash_insert(dmu_buf_impl_t *db) 397 { 398 dbuf_hash_table_t *h = &dbuf_hash_table; 399 objset_t *os = db->db_objset; 400 uint64_t obj = db->db.db_object; 401 int level = db->db_level; 402 uint64_t blkid, idx; 403 dmu_buf_impl_t *dbf; 404 uint32_t i; 405 406 blkid = db->db_blkid; 407 ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash); 408 idx = db->db_hash & h->hash_table_mask; 409 410 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 411 for (dbf = h->hash_table[idx], i = 0; dbf != NULL; 412 dbf = dbf->db_hash_next, i++) { 413 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 414 mutex_enter(&dbf->db_mtx); 415 if (dbf->db_state != DB_EVICTING) { 416 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 417 return (dbf); 418 } 419 mutex_exit(&dbf->db_mtx); 420 } 421 } 422 423 if (i > 0) { 424 DBUF_STAT_BUMP(hash_collisions); 425 if (i == 1) 426 DBUF_STAT_BUMP(hash_chains); 427 428 DBUF_STAT_MAX(hash_chain_max, i); 429 } 430 431 mutex_enter(&db->db_mtx); 432 db->db_hash_next = h->hash_table[idx]; 433 h->hash_table[idx] = db; 434 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 435 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64); 436 DBUF_STAT_MAX(hash_elements_max, he); 437 438 return (NULL); 439 } 440 441 /* 442 * This returns whether this dbuf should be stored in the metadata cache, which 443 * is based on whether it's from one of the dnode types that store data related 444 * to traversing dataset hierarchies. 445 */ 446 static boolean_t 447 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 448 { 449 DB_DNODE_ENTER(db); 450 dmu_object_type_t type = DB_DNODE(db)->dn_type; 451 DB_DNODE_EXIT(db); 452 453 /* Check if this dbuf is one of the types we care about */ 454 if (DMU_OT_IS_METADATA_CACHED(type)) { 455 /* If we hit this, then we set something up wrong in dmu_ot */ 456 ASSERT(DMU_OT_IS_METADATA(type)); 457 458 /* 459 * Sanity check for small-memory systems: don't allocate too 460 * much memory for this purpose. 461 */ 462 if (zfs_refcount_count( 463 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 464 dbuf_metadata_cache_target_bytes()) { 465 DBUF_STAT_BUMP(metadata_cache_overflow); 466 return (B_FALSE); 467 } 468 469 return (B_TRUE); 470 } 471 472 return (B_FALSE); 473 } 474 475 /* 476 * Remove an entry from the hash table. It must be in the EVICTING state. 477 */ 478 static void 479 dbuf_hash_remove(dmu_buf_impl_t *db) 480 { 481 dbuf_hash_table_t *h = &dbuf_hash_table; 482 uint64_t idx; 483 dmu_buf_impl_t *dbf, **dbp; 484 485 ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level, 486 db->db_blkid), ==, db->db_hash); 487 idx = db->db_hash & h->hash_table_mask; 488 489 /* 490 * We mustn't hold db_mtx to maintain lock ordering: 491 * DBUF_HASH_MUTEX > db_mtx. 492 */ 493 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 494 ASSERT(db->db_state == DB_EVICTING); 495 ASSERT(!MUTEX_HELD(&db->db_mtx)); 496 497 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 498 dbp = &h->hash_table[idx]; 499 while ((dbf = *dbp) != db) { 500 dbp = &dbf->db_hash_next; 501 ASSERT(dbf != NULL); 502 } 503 *dbp = db->db_hash_next; 504 db->db_hash_next = NULL; 505 if (h->hash_table[idx] && 506 h->hash_table[idx]->db_hash_next == NULL) 507 DBUF_STAT_BUMPDOWN(hash_chains); 508 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 509 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64); 510 } 511 512 typedef enum { 513 DBVU_EVICTING, 514 DBVU_NOT_EVICTING 515 } dbvu_verify_type_t; 516 517 static void 518 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 519 { 520 #ifdef ZFS_DEBUG 521 int64_t holds; 522 523 if (db->db_user == NULL) 524 return; 525 526 /* Only data blocks support the attachment of user data. */ 527 ASSERT(db->db_level == 0); 528 529 /* Clients must resolve a dbuf before attaching user data. */ 530 ASSERT(db->db.db_data != NULL); 531 ASSERT3U(db->db_state, ==, DB_CACHED); 532 533 holds = zfs_refcount_count(&db->db_holds); 534 if (verify_type == DBVU_EVICTING) { 535 /* 536 * Immediate eviction occurs when holds == dirtycnt. 537 * For normal eviction buffers, holds is zero on 538 * eviction, except when dbuf_fix_old_data() calls 539 * dbuf_clear_data(). However, the hold count can grow 540 * during eviction even though db_mtx is held (see 541 * dmu_bonus_hold() for an example), so we can only 542 * test the generic invariant that holds >= dirtycnt. 543 */ 544 ASSERT3U(holds, >=, db->db_dirtycnt); 545 } else { 546 if (db->db_user_immediate_evict == TRUE) 547 ASSERT3U(holds, >=, db->db_dirtycnt); 548 else 549 ASSERT3U(holds, >, 0); 550 } 551 #endif 552 } 553 554 static void 555 dbuf_evict_user(dmu_buf_impl_t *db) 556 { 557 dmu_buf_user_t *dbu = db->db_user; 558 559 ASSERT(MUTEX_HELD(&db->db_mtx)); 560 561 if (dbu == NULL) 562 return; 563 564 dbuf_verify_user(db, DBVU_EVICTING); 565 db->db_user = NULL; 566 567 #ifdef ZFS_DEBUG 568 if (dbu->dbu_clear_on_evict_dbufp != NULL) 569 *dbu->dbu_clear_on_evict_dbufp = NULL; 570 #endif 571 572 if (db->db_caching_status != DB_NO_CACHE) { 573 /* 574 * This is a cached dbuf, so the size of the user data is 575 * included in its cached amount. We adjust it here because the 576 * user data has already been detached from the dbuf, and the 577 * sync functions are not supposed to touch it (the dbuf might 578 * not exist anymore by the time the sync functions run. 579 */ 580 uint64_t size = dbu->dbu_size; 581 (void) zfs_refcount_remove_many( 582 &dbuf_caches[db->db_caching_status].size, size, db); 583 if (db->db_caching_status == DB_DBUF_CACHE) 584 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size); 585 } 586 587 /* 588 * There are two eviction callbacks - one that we call synchronously 589 * and one that we invoke via a taskq. The async one is useful for 590 * avoiding lock order reversals and limiting stack depth. 591 * 592 * Note that if we have a sync callback but no async callback, 593 * it's likely that the sync callback will free the structure 594 * containing the dbu. In that case we need to take care to not 595 * dereference dbu after calling the sync evict func. 596 */ 597 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 598 599 if (dbu->dbu_evict_func_sync != NULL) 600 dbu->dbu_evict_func_sync(dbu); 601 602 if (has_async) { 603 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 604 dbu, 0, &dbu->dbu_tqent); 605 } 606 } 607 608 boolean_t 609 dbuf_is_metadata(dmu_buf_impl_t *db) 610 { 611 /* 612 * Consider indirect blocks and spill blocks to be meta data. 613 */ 614 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { 615 return (B_TRUE); 616 } else { 617 boolean_t is_metadata; 618 619 DB_DNODE_ENTER(db); 620 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 621 DB_DNODE_EXIT(db); 622 623 return (is_metadata); 624 } 625 } 626 627 /* 628 * We want to exclude buffers that are on a special allocation class from 629 * L2ARC. 630 */ 631 boolean_t 632 dbuf_is_l2cacheable(dmu_buf_impl_t *db) 633 { 634 if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL || 635 (db->db_objset->os_secondary_cache == 636 ZFS_CACHE_METADATA && dbuf_is_metadata(db))) { 637 if (l2arc_exclude_special == 0) 638 return (B_TRUE); 639 640 blkptr_t *bp = db->db_blkptr; 641 if (bp == NULL || BP_IS_HOLE(bp)) 642 return (B_FALSE); 643 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); 644 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev; 645 vdev_t *vd = NULL; 646 647 if (vdev < rvd->vdev_children) 648 vd = rvd->vdev_child[vdev]; 649 650 if (vd == NULL) 651 return (B_TRUE); 652 653 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 654 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) 655 return (B_TRUE); 656 } 657 return (B_FALSE); 658 } 659 660 static inline boolean_t 661 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level) 662 { 663 if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL || 664 (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA && 665 (level > 0 || 666 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) { 667 if (l2arc_exclude_special == 0) 668 return (B_TRUE); 669 670 if (bp == NULL || BP_IS_HOLE(bp)) 671 return (B_FALSE); 672 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); 673 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev; 674 vdev_t *vd = NULL; 675 676 if (vdev < rvd->vdev_children) 677 vd = rvd->vdev_child[vdev]; 678 679 if (vd == NULL) 680 return (B_TRUE); 681 682 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 683 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) 684 return (B_TRUE); 685 } 686 return (B_FALSE); 687 } 688 689 690 /* 691 * This function *must* return indices evenly distributed between all 692 * sublists of the multilist. This is needed due to how the dbuf eviction 693 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 694 * distributed between all sublists and uses this assumption when 695 * deciding which sublist to evict from and how much to evict from it. 696 */ 697 static unsigned int 698 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 699 { 700 dmu_buf_impl_t *db = obj; 701 702 /* 703 * The assumption here, is the hash value for a given 704 * dmu_buf_impl_t will remain constant throughout it's lifetime 705 * (i.e. it's objset, object, level and blkid fields don't change). 706 * Thus, we don't need to store the dbuf's sublist index 707 * on insertion, as this index can be recalculated on removal. 708 * 709 * Also, the low order bits of the hash value are thought to be 710 * distributed evenly. Otherwise, in the case that the multilist 711 * has a power of two number of sublists, each sublists' usage 712 * would not be evenly distributed. In this context full 64bit 713 * division would be a waste of time, so limit it to 32 bits. 714 */ 715 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object, 716 db->db_level, db->db_blkid) % 717 multilist_get_num_sublists(ml)); 718 } 719 720 /* 721 * The target size of the dbuf cache can grow with the ARC target, 722 * unless limited by the tunable dbuf_cache_max_bytes. 723 */ 724 static inline unsigned long 725 dbuf_cache_target_bytes(void) 726 { 727 return (MIN(dbuf_cache_max_bytes, 728 arc_target_bytes() >> dbuf_cache_shift)); 729 } 730 731 /* 732 * The target size of the dbuf metadata cache can grow with the ARC target, 733 * unless limited by the tunable dbuf_metadata_cache_max_bytes. 734 */ 735 static inline unsigned long 736 dbuf_metadata_cache_target_bytes(void) 737 { 738 return (MIN(dbuf_metadata_cache_max_bytes, 739 arc_target_bytes() >> dbuf_metadata_cache_shift)); 740 } 741 742 static inline uint64_t 743 dbuf_cache_hiwater_bytes(void) 744 { 745 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 746 return (dbuf_cache_target + 747 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100); 748 } 749 750 static inline uint64_t 751 dbuf_cache_lowater_bytes(void) 752 { 753 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 754 return (dbuf_cache_target - 755 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100); 756 } 757 758 static inline boolean_t 759 dbuf_cache_above_lowater(void) 760 { 761 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 762 dbuf_cache_lowater_bytes()); 763 } 764 765 /* 766 * Evict the oldest eligible dbuf from the dbuf cache. 767 */ 768 static void 769 dbuf_evict_one(void) 770 { 771 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache); 772 multilist_sublist_t *mls = multilist_sublist_lock( 773 &dbuf_caches[DB_DBUF_CACHE].cache, idx); 774 775 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 776 777 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 778 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 779 db = multilist_sublist_prev(mls, db); 780 } 781 782 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 783 multilist_sublist_t *, mls); 784 785 if (db != NULL) { 786 multilist_sublist_remove(mls, db); 787 multilist_sublist_unlock(mls); 788 uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db); 789 (void) zfs_refcount_remove_many( 790 &dbuf_caches[DB_DBUF_CACHE].size, size, db); 791 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 792 DBUF_STAT_BUMPDOWN(cache_count); 793 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size); 794 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 795 db->db_caching_status = DB_NO_CACHE; 796 dbuf_destroy(db); 797 DBUF_STAT_BUMP(cache_total_evicts); 798 } else { 799 multilist_sublist_unlock(mls); 800 } 801 } 802 803 /* 804 * The dbuf evict thread is responsible for aging out dbufs from the 805 * cache. Once the cache has reached it's maximum size, dbufs are removed 806 * and destroyed. The eviction thread will continue running until the size 807 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 808 * out of the cache it is destroyed and becomes eligible for arc eviction. 809 */ 810 static __attribute__((noreturn)) void 811 dbuf_evict_thread(void *unused) 812 { 813 (void) unused; 814 callb_cpr_t cpr; 815 816 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 817 818 mutex_enter(&dbuf_evict_lock); 819 while (!dbuf_evict_thread_exit) { 820 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 821 CALLB_CPR_SAFE_BEGIN(&cpr); 822 (void) cv_timedwait_idle_hires(&dbuf_evict_cv, 823 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 824 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 825 } 826 mutex_exit(&dbuf_evict_lock); 827 828 /* 829 * Keep evicting as long as we're above the low water mark 830 * for the cache. We do this without holding the locks to 831 * minimize lock contention. 832 */ 833 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 834 dbuf_evict_one(); 835 } 836 837 mutex_enter(&dbuf_evict_lock); 838 } 839 840 dbuf_evict_thread_exit = B_FALSE; 841 cv_broadcast(&dbuf_evict_cv); 842 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 843 thread_exit(); 844 } 845 846 /* 847 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 848 * If the dbuf cache is at its high water mark, then evict a dbuf from the 849 * dbuf cache using the caller's context. 850 */ 851 static void 852 dbuf_evict_notify(uint64_t size) 853 { 854 /* 855 * We check if we should evict without holding the dbuf_evict_lock, 856 * because it's OK to occasionally make the wrong decision here, 857 * and grabbing the lock results in massive lock contention. 858 */ 859 if (size > dbuf_cache_target_bytes()) { 860 if (size > dbuf_cache_hiwater_bytes()) 861 dbuf_evict_one(); 862 cv_signal(&dbuf_evict_cv); 863 } 864 } 865 866 static int 867 dbuf_kstat_update(kstat_t *ksp, int rw) 868 { 869 dbuf_stats_t *ds = ksp->ks_data; 870 dbuf_hash_table_t *h = &dbuf_hash_table; 871 872 if (rw == KSTAT_WRITE) 873 return (SET_ERROR(EACCES)); 874 875 ds->cache_count.value.ui64 = 876 wmsum_value(&dbuf_sums.cache_count); 877 ds->cache_size_bytes.value.ui64 = 878 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size); 879 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes(); 880 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes(); 881 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes(); 882 ds->cache_total_evicts.value.ui64 = 883 wmsum_value(&dbuf_sums.cache_total_evicts); 884 for (int i = 0; i < DN_MAX_LEVELS; i++) { 885 ds->cache_levels[i].value.ui64 = 886 wmsum_value(&dbuf_sums.cache_levels[i]); 887 ds->cache_levels_bytes[i].value.ui64 = 888 wmsum_value(&dbuf_sums.cache_levels_bytes[i]); 889 } 890 ds->hash_hits.value.ui64 = 891 wmsum_value(&dbuf_sums.hash_hits); 892 ds->hash_misses.value.ui64 = 893 wmsum_value(&dbuf_sums.hash_misses); 894 ds->hash_collisions.value.ui64 = 895 wmsum_value(&dbuf_sums.hash_collisions); 896 ds->hash_chains.value.ui64 = 897 wmsum_value(&dbuf_sums.hash_chains); 898 ds->hash_insert_race.value.ui64 = 899 wmsum_value(&dbuf_sums.hash_insert_race); 900 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1; 901 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1; 902 ds->metadata_cache_count.value.ui64 = 903 wmsum_value(&dbuf_sums.metadata_cache_count); 904 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count( 905 &dbuf_caches[DB_DBUF_METADATA_CACHE].size); 906 ds->metadata_cache_overflow.value.ui64 = 907 wmsum_value(&dbuf_sums.metadata_cache_overflow); 908 return (0); 909 } 910 911 void 912 dbuf_init(void) 913 { 914 uint64_t hmsize, hsize = 1ULL << 16; 915 dbuf_hash_table_t *h = &dbuf_hash_table; 916 917 /* 918 * The hash table is big enough to fill one eighth of physical memory 919 * with an average block size of zfs_arc_average_blocksize (default 8K). 920 * By default, the table will take up 921 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 922 */ 923 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8) 924 hsize <<= 1; 925 926 h->hash_table = NULL; 927 while (h->hash_table == NULL) { 928 h->hash_table_mask = hsize - 1; 929 930 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); 931 if (h->hash_table == NULL) 932 hsize >>= 1; 933 934 ASSERT3U(hsize, >=, 1ULL << 10); 935 } 936 937 /* 938 * The hash table buckets are protected by an array of mutexes where 939 * each mutex is reponsible for protecting 128 buckets. A minimum 940 * array size of 8192 is targeted to avoid contention. 941 */ 942 if (dbuf_mutex_cache_shift == 0) 943 hmsize = MAX(hsize >> 7, 1ULL << 13); 944 else 945 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24); 946 947 h->hash_mutexes = NULL; 948 while (h->hash_mutexes == NULL) { 949 h->hash_mutex_mask = hmsize - 1; 950 951 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t), 952 KM_SLEEP); 953 if (h->hash_mutexes == NULL) 954 hmsize >>= 1; 955 } 956 957 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 958 sizeof (dmu_buf_impl_t), 959 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 960 961 for (int i = 0; i < hmsize; i++) 962 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 963 964 dbuf_stats_init(h); 965 966 /* 967 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 968 * configuration is not required. 969 */ 970 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); 971 972 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 973 multilist_create(&dbuf_caches[dcs].cache, 974 sizeof (dmu_buf_impl_t), 975 offsetof(dmu_buf_impl_t, db_cache_link), 976 dbuf_cache_multilist_index_func); 977 zfs_refcount_create(&dbuf_caches[dcs].size); 978 } 979 980 dbuf_evict_thread_exit = B_FALSE; 981 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 982 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 983 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 984 NULL, 0, &p0, TS_RUN, minclsyspri); 985 986 wmsum_init(&dbuf_sums.cache_count, 0); 987 wmsum_init(&dbuf_sums.cache_total_evicts, 0); 988 for (int i = 0; i < DN_MAX_LEVELS; i++) { 989 wmsum_init(&dbuf_sums.cache_levels[i], 0); 990 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0); 991 } 992 wmsum_init(&dbuf_sums.hash_hits, 0); 993 wmsum_init(&dbuf_sums.hash_misses, 0); 994 wmsum_init(&dbuf_sums.hash_collisions, 0); 995 wmsum_init(&dbuf_sums.hash_chains, 0); 996 wmsum_init(&dbuf_sums.hash_insert_race, 0); 997 wmsum_init(&dbuf_sums.metadata_cache_count, 0); 998 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0); 999 1000 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc", 1001 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t), 1002 KSTAT_FLAG_VIRTUAL); 1003 if (dbuf_ksp != NULL) { 1004 for (int i = 0; i < DN_MAX_LEVELS; i++) { 1005 snprintf(dbuf_stats.cache_levels[i].name, 1006 KSTAT_STRLEN, "cache_level_%d", i); 1007 dbuf_stats.cache_levels[i].data_type = 1008 KSTAT_DATA_UINT64; 1009 snprintf(dbuf_stats.cache_levels_bytes[i].name, 1010 KSTAT_STRLEN, "cache_level_%d_bytes", i); 1011 dbuf_stats.cache_levels_bytes[i].data_type = 1012 KSTAT_DATA_UINT64; 1013 } 1014 dbuf_ksp->ks_data = &dbuf_stats; 1015 dbuf_ksp->ks_update = dbuf_kstat_update; 1016 kstat_install(dbuf_ksp); 1017 } 1018 } 1019 1020 void 1021 dbuf_fini(void) 1022 { 1023 dbuf_hash_table_t *h = &dbuf_hash_table; 1024 1025 dbuf_stats_destroy(); 1026 1027 for (int i = 0; i < (h->hash_mutex_mask + 1); i++) 1028 mutex_destroy(&h->hash_mutexes[i]); 1029 1030 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 1031 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) * 1032 sizeof (kmutex_t)); 1033 1034 kmem_cache_destroy(dbuf_kmem_cache); 1035 taskq_destroy(dbu_evict_taskq); 1036 1037 mutex_enter(&dbuf_evict_lock); 1038 dbuf_evict_thread_exit = B_TRUE; 1039 while (dbuf_evict_thread_exit) { 1040 cv_signal(&dbuf_evict_cv); 1041 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 1042 } 1043 mutex_exit(&dbuf_evict_lock); 1044 1045 mutex_destroy(&dbuf_evict_lock); 1046 cv_destroy(&dbuf_evict_cv); 1047 1048 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 1049 zfs_refcount_destroy(&dbuf_caches[dcs].size); 1050 multilist_destroy(&dbuf_caches[dcs].cache); 1051 } 1052 1053 if (dbuf_ksp != NULL) { 1054 kstat_delete(dbuf_ksp); 1055 dbuf_ksp = NULL; 1056 } 1057 1058 wmsum_fini(&dbuf_sums.cache_count); 1059 wmsum_fini(&dbuf_sums.cache_total_evicts); 1060 for (int i = 0; i < DN_MAX_LEVELS; i++) { 1061 wmsum_fini(&dbuf_sums.cache_levels[i]); 1062 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]); 1063 } 1064 wmsum_fini(&dbuf_sums.hash_hits); 1065 wmsum_fini(&dbuf_sums.hash_misses); 1066 wmsum_fini(&dbuf_sums.hash_collisions); 1067 wmsum_fini(&dbuf_sums.hash_chains); 1068 wmsum_fini(&dbuf_sums.hash_insert_race); 1069 wmsum_fini(&dbuf_sums.metadata_cache_count); 1070 wmsum_fini(&dbuf_sums.metadata_cache_overflow); 1071 } 1072 1073 /* 1074 * Other stuff. 1075 */ 1076 1077 #ifdef ZFS_DEBUG 1078 static void 1079 dbuf_verify(dmu_buf_impl_t *db) 1080 { 1081 dnode_t *dn; 1082 dbuf_dirty_record_t *dr; 1083 uint32_t txg_prev; 1084 1085 ASSERT(MUTEX_HELD(&db->db_mtx)); 1086 1087 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 1088 return; 1089 1090 ASSERT(db->db_objset != NULL); 1091 DB_DNODE_ENTER(db); 1092 dn = DB_DNODE(db); 1093 if (dn == NULL) { 1094 ASSERT(db->db_parent == NULL); 1095 ASSERT(db->db_blkptr == NULL); 1096 } else { 1097 ASSERT3U(db->db.db_object, ==, dn->dn_object); 1098 ASSERT3P(db->db_objset, ==, dn->dn_objset); 1099 ASSERT3U(db->db_level, <, dn->dn_nlevels); 1100 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 1101 db->db_blkid == DMU_SPILL_BLKID || 1102 !avl_is_empty(&dn->dn_dbufs)); 1103 } 1104 if (db->db_blkid == DMU_BONUS_BLKID) { 1105 ASSERT(dn != NULL); 1106 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1107 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 1108 } else if (db->db_blkid == DMU_SPILL_BLKID) { 1109 ASSERT(dn != NULL); 1110 ASSERT0(db->db.db_offset); 1111 } else { 1112 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 1113 } 1114 1115 if ((dr = list_head(&db->db_dirty_records)) != NULL) { 1116 ASSERT(dr->dr_dbuf == db); 1117 txg_prev = dr->dr_txg; 1118 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL; 1119 dr = list_next(&db->db_dirty_records, dr)) { 1120 ASSERT(dr->dr_dbuf == db); 1121 ASSERT(txg_prev > dr->dr_txg); 1122 txg_prev = dr->dr_txg; 1123 } 1124 } 1125 1126 /* 1127 * We can't assert that db_size matches dn_datablksz because it 1128 * can be momentarily different when another thread is doing 1129 * dnode_set_blksz(). 1130 */ 1131 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 1132 dr = db->db_data_pending; 1133 /* 1134 * It should only be modified in syncing context, so 1135 * make sure we only have one copy of the data. 1136 */ 1137 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 1138 } 1139 1140 /* verify db->db_blkptr */ 1141 if (db->db_blkptr) { 1142 if (db->db_parent == dn->dn_dbuf) { 1143 /* db is pointed to by the dnode */ 1144 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 1145 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 1146 ASSERT(db->db_parent == NULL); 1147 else 1148 ASSERT(db->db_parent != NULL); 1149 if (db->db_blkid != DMU_SPILL_BLKID) 1150 ASSERT3P(db->db_blkptr, ==, 1151 &dn->dn_phys->dn_blkptr[db->db_blkid]); 1152 } else { 1153 /* db is pointed to by an indirect block */ 1154 int epb __maybe_unused = db->db_parent->db.db_size >> 1155 SPA_BLKPTRSHIFT; 1156 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 1157 ASSERT3U(db->db_parent->db.db_object, ==, 1158 db->db.db_object); 1159 /* 1160 * dnode_grow_indblksz() can make this fail if we don't 1161 * have the parent's rwlock. XXX indblksz no longer 1162 * grows. safe to do this now? 1163 */ 1164 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) { 1165 ASSERT3P(db->db_blkptr, ==, 1166 ((blkptr_t *)db->db_parent->db.db_data + 1167 db->db_blkid % epb)); 1168 } 1169 } 1170 } 1171 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 1172 (db->db_buf == NULL || db->db_buf->b_data) && 1173 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 1174 db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) { 1175 /* 1176 * If the blkptr isn't set but they have nonzero data, 1177 * it had better be dirty, otherwise we'll lose that 1178 * data when we evict this buffer. 1179 * 1180 * There is an exception to this rule for indirect blocks; in 1181 * this case, if the indirect block is a hole, we fill in a few 1182 * fields on each of the child blocks (importantly, birth time) 1183 * to prevent hole birth times from being lost when you 1184 * partially fill in a hole. 1185 */ 1186 if (db->db_dirtycnt == 0) { 1187 if (db->db_level == 0) { 1188 uint64_t *buf = db->db.db_data; 1189 int i; 1190 1191 for (i = 0; i < db->db.db_size >> 3; i++) { 1192 ASSERT(buf[i] == 0); 1193 } 1194 } else { 1195 blkptr_t *bps = db->db.db_data; 1196 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 1197 db->db.db_size); 1198 /* 1199 * We want to verify that all the blkptrs in the 1200 * indirect block are holes, but we may have 1201 * automatically set up a few fields for them. 1202 * We iterate through each blkptr and verify 1203 * they only have those fields set. 1204 */ 1205 for (int i = 0; 1206 i < db->db.db_size / sizeof (blkptr_t); 1207 i++) { 1208 blkptr_t *bp = &bps[i]; 1209 ASSERT(ZIO_CHECKSUM_IS_ZERO( 1210 &bp->blk_cksum)); 1211 ASSERT( 1212 DVA_IS_EMPTY(&bp->blk_dva[0]) && 1213 DVA_IS_EMPTY(&bp->blk_dva[1]) && 1214 DVA_IS_EMPTY(&bp->blk_dva[2])); 1215 ASSERT0(bp->blk_fill); 1216 ASSERT0(bp->blk_pad[0]); 1217 ASSERT0(bp->blk_pad[1]); 1218 ASSERT(!BP_IS_EMBEDDED(bp)); 1219 ASSERT(BP_IS_HOLE(bp)); 1220 ASSERT0(bp->blk_phys_birth); 1221 } 1222 } 1223 } 1224 } 1225 DB_DNODE_EXIT(db); 1226 } 1227 #endif 1228 1229 static void 1230 dbuf_clear_data(dmu_buf_impl_t *db) 1231 { 1232 ASSERT(MUTEX_HELD(&db->db_mtx)); 1233 dbuf_evict_user(db); 1234 ASSERT3P(db->db_buf, ==, NULL); 1235 db->db.db_data = NULL; 1236 if (db->db_state != DB_NOFILL) { 1237 db->db_state = DB_UNCACHED; 1238 DTRACE_SET_STATE(db, "clear data"); 1239 } 1240 } 1241 1242 static void 1243 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 1244 { 1245 ASSERT(MUTEX_HELD(&db->db_mtx)); 1246 ASSERT(buf != NULL); 1247 1248 db->db_buf = buf; 1249 ASSERT(buf->b_data != NULL); 1250 db->db.db_data = buf->b_data; 1251 } 1252 1253 static arc_buf_t * 1254 dbuf_alloc_arcbuf(dmu_buf_impl_t *db) 1255 { 1256 spa_t *spa = db->db_objset->os_spa; 1257 1258 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 1259 } 1260 1261 /* 1262 * Loan out an arc_buf for read. Return the loaned arc_buf. 1263 */ 1264 arc_buf_t * 1265 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 1266 { 1267 arc_buf_t *abuf; 1268 1269 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1270 mutex_enter(&db->db_mtx); 1271 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { 1272 int blksz = db->db.db_size; 1273 spa_t *spa = db->db_objset->os_spa; 1274 1275 mutex_exit(&db->db_mtx); 1276 abuf = arc_loan_buf(spa, B_FALSE, blksz); 1277 memcpy(abuf->b_data, db->db.db_data, blksz); 1278 } else { 1279 abuf = db->db_buf; 1280 arc_loan_inuse_buf(abuf, db); 1281 db->db_buf = NULL; 1282 dbuf_clear_data(db); 1283 mutex_exit(&db->db_mtx); 1284 } 1285 return (abuf); 1286 } 1287 1288 /* 1289 * Calculate which level n block references the data at the level 0 offset 1290 * provided. 1291 */ 1292 uint64_t 1293 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) 1294 { 1295 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 1296 /* 1297 * The level n blkid is equal to the level 0 blkid divided by 1298 * the number of level 0s in a level n block. 1299 * 1300 * The level 0 blkid is offset >> datablkshift = 1301 * offset / 2^datablkshift. 1302 * 1303 * The number of level 0s in a level n is the number of block 1304 * pointers in an indirect block, raised to the power of level. 1305 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 1306 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 1307 * 1308 * Thus, the level n blkid is: offset / 1309 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT)))) 1310 * = offset / 2^(datablkshift + level * 1311 * (indblkshift - SPA_BLKPTRSHIFT)) 1312 * = offset >> (datablkshift + level * 1313 * (indblkshift - SPA_BLKPTRSHIFT)) 1314 */ 1315 1316 const unsigned exp = dn->dn_datablkshift + 1317 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 1318 1319 if (exp >= 8 * sizeof (offset)) { 1320 /* This only happens on the highest indirection level */ 1321 ASSERT3U(level, ==, dn->dn_nlevels - 1); 1322 return (0); 1323 } 1324 1325 ASSERT3U(exp, <, 8 * sizeof (offset)); 1326 1327 return (offset >> exp); 1328 } else { 1329 ASSERT3U(offset, <, dn->dn_datablksz); 1330 return (0); 1331 } 1332 } 1333 1334 /* 1335 * This function is used to lock the parent of the provided dbuf. This should be 1336 * used when modifying or reading db_blkptr. 1337 */ 1338 db_lock_type_t 1339 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag) 1340 { 1341 enum db_lock_type ret = DLT_NONE; 1342 if (db->db_parent != NULL) { 1343 rw_enter(&db->db_parent->db_rwlock, rw); 1344 ret = DLT_PARENT; 1345 } else if (dmu_objset_ds(db->db_objset) != NULL) { 1346 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw, 1347 tag); 1348 ret = DLT_OBJSET; 1349 } 1350 /* 1351 * We only return a DLT_NONE lock when it's the top-most indirect block 1352 * of the meta-dnode of the MOS. 1353 */ 1354 return (ret); 1355 } 1356 1357 /* 1358 * We need to pass the lock type in because it's possible that the block will 1359 * move from being the topmost indirect block in a dnode (and thus, have no 1360 * parent) to not the top-most via an indirection increase. This would cause a 1361 * panic if we didn't pass the lock type in. 1362 */ 1363 void 1364 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag) 1365 { 1366 if (type == DLT_PARENT) 1367 rw_exit(&db->db_parent->db_rwlock); 1368 else if (type == DLT_OBJSET) 1369 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag); 1370 } 1371 1372 static void 1373 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1374 arc_buf_t *buf, void *vdb) 1375 { 1376 (void) zb, (void) bp; 1377 dmu_buf_impl_t *db = vdb; 1378 1379 mutex_enter(&db->db_mtx); 1380 ASSERT3U(db->db_state, ==, DB_READ); 1381 /* 1382 * All reads are synchronous, so we must have a hold on the dbuf 1383 */ 1384 ASSERT(zfs_refcount_count(&db->db_holds) > 0); 1385 ASSERT(db->db_buf == NULL); 1386 ASSERT(db->db.db_data == NULL); 1387 if (buf == NULL) { 1388 /* i/o error */ 1389 ASSERT(zio == NULL || zio->io_error != 0); 1390 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1391 ASSERT3P(db->db_buf, ==, NULL); 1392 db->db_state = DB_UNCACHED; 1393 DTRACE_SET_STATE(db, "i/o error"); 1394 } else if (db->db_level == 0 && db->db_freed_in_flight) { 1395 /* freed in flight */ 1396 ASSERT(zio == NULL || zio->io_error == 0); 1397 arc_release(buf, db); 1398 memset(buf->b_data, 0, db->db.db_size); 1399 arc_buf_freeze(buf); 1400 db->db_freed_in_flight = FALSE; 1401 dbuf_set_data(db, buf); 1402 db->db_state = DB_CACHED; 1403 DTRACE_SET_STATE(db, "freed in flight"); 1404 } else { 1405 /* success */ 1406 ASSERT(zio == NULL || zio->io_error == 0); 1407 dbuf_set_data(db, buf); 1408 db->db_state = DB_CACHED; 1409 DTRACE_SET_STATE(db, "successful read"); 1410 } 1411 cv_broadcast(&db->db_changed); 1412 dbuf_rele_and_unlock(db, NULL, B_FALSE); 1413 } 1414 1415 /* 1416 * Shortcut for performing reads on bonus dbufs. Returns 1417 * an error if we fail to verify the dnode associated with 1418 * a decrypted block. Otherwise success. 1419 */ 1420 static int 1421 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags) 1422 { 1423 int bonuslen, max_bonuslen, err; 1424 1425 err = dbuf_read_verify_dnode_crypt(db, flags); 1426 if (err) 1427 return (err); 1428 1429 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 1430 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1431 ASSERT(MUTEX_HELD(&db->db_mtx)); 1432 ASSERT(DB_DNODE_HELD(db)); 1433 ASSERT3U(bonuslen, <=, db->db.db_size); 1434 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP); 1435 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 1436 if (bonuslen < max_bonuslen) 1437 memset(db->db.db_data, 0, max_bonuslen); 1438 if (bonuslen) 1439 memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen); 1440 db->db_state = DB_CACHED; 1441 DTRACE_SET_STATE(db, "bonus buffer filled"); 1442 return (0); 1443 } 1444 1445 static void 1446 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp) 1447 { 1448 blkptr_t *bps = db->db.db_data; 1449 uint32_t indbs = 1ULL << dn->dn_indblkshift; 1450 int n_bps = indbs >> SPA_BLKPTRSHIFT; 1451 1452 for (int i = 0; i < n_bps; i++) { 1453 blkptr_t *bp = &bps[i]; 1454 1455 ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs); 1456 BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ? 1457 dn->dn_datablksz : BP_GET_LSIZE(dbbp)); 1458 BP_SET_TYPE(bp, BP_GET_TYPE(dbbp)); 1459 BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1); 1460 BP_SET_BIRTH(bp, dbbp->blk_birth, 0); 1461 } 1462 } 1463 1464 /* 1465 * Handle reads on dbufs that are holes, if necessary. This function 1466 * requires that the dbuf's mutex is held. Returns success (0) if action 1467 * was taken, ENOENT if no action was taken. 1468 */ 1469 static int 1470 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp) 1471 { 1472 ASSERT(MUTEX_HELD(&db->db_mtx)); 1473 1474 int is_hole = bp == NULL || BP_IS_HOLE(bp); 1475 /* 1476 * For level 0 blocks only, if the above check fails: 1477 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 1478 * processes the delete record and clears the bp while we are waiting 1479 * for the dn_mtx (resulting in a "no" from block_freed). 1480 */ 1481 if (!is_hole && db->db_level == 0) 1482 is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp); 1483 1484 if (is_hole) { 1485 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1486 memset(db->db.db_data, 0, db->db.db_size); 1487 1488 if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) && 1489 bp->blk_birth != 0) { 1490 dbuf_handle_indirect_hole(db, dn, bp); 1491 } 1492 db->db_state = DB_CACHED; 1493 DTRACE_SET_STATE(db, "hole read satisfied"); 1494 return (0); 1495 } 1496 return (ENOENT); 1497 } 1498 1499 /* 1500 * This function ensures that, when doing a decrypting read of a block, 1501 * we make sure we have decrypted the dnode associated with it. We must do 1502 * this so that we ensure we are fully authenticating the checksum-of-MACs 1503 * tree from the root of the objset down to this block. Indirect blocks are 1504 * always verified against their secure checksum-of-MACs assuming that the 1505 * dnode containing them is correct. Now that we are doing a decrypting read, 1506 * we can be sure that the key is loaded and verify that assumption. This is 1507 * especially important considering that we always read encrypted dnode 1508 * blocks as raw data (without verifying their MACs) to start, and 1509 * decrypt / authenticate them when we need to read an encrypted bonus buffer. 1510 */ 1511 static int 1512 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags) 1513 { 1514 int err = 0; 1515 objset_t *os = db->db_objset; 1516 arc_buf_t *dnode_abuf; 1517 dnode_t *dn; 1518 zbookmark_phys_t zb; 1519 1520 ASSERT(MUTEX_HELD(&db->db_mtx)); 1521 1522 if ((flags & DB_RF_NO_DECRYPT) != 0 || 1523 !os->os_encrypted || os->os_raw_receive) 1524 return (0); 1525 1526 DB_DNODE_ENTER(db); 1527 dn = DB_DNODE(db); 1528 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL; 1529 1530 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) { 1531 DB_DNODE_EXIT(db); 1532 return (0); 1533 } 1534 1535 SET_BOOKMARK(&zb, dmu_objset_id(os), 1536 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid); 1537 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE); 1538 1539 /* 1540 * An error code of EACCES tells us that the key is still not 1541 * available. This is ok if we are only reading authenticated 1542 * (and therefore non-encrypted) blocks. 1543 */ 1544 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && 1545 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || 1546 (db->db_blkid == DMU_BONUS_BLKID && 1547 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) 1548 err = 0; 1549 1550 DB_DNODE_EXIT(db); 1551 1552 return (err); 1553 } 1554 1555 /* 1556 * Drops db_mtx and the parent lock specified by dblt and tag before 1557 * returning. 1558 */ 1559 static int 1560 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags, 1561 db_lock_type_t dblt, const void *tag) 1562 { 1563 dnode_t *dn; 1564 zbookmark_phys_t zb; 1565 uint32_t aflags = ARC_FLAG_NOWAIT; 1566 int err, zio_flags; 1567 blkptr_t bp, *bpp; 1568 1569 DB_DNODE_ENTER(db); 1570 dn = DB_DNODE(db); 1571 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1572 ASSERT(MUTEX_HELD(&db->db_mtx)); 1573 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1574 ASSERT(db->db_buf == NULL); 1575 ASSERT(db->db_parent == NULL || 1576 RW_LOCK_HELD(&db->db_parent->db_rwlock)); 1577 1578 if (db->db_blkid == DMU_BONUS_BLKID) { 1579 err = dbuf_read_bonus(db, dn, flags); 1580 goto early_unlock; 1581 } 1582 1583 if (db->db_state == DB_UNCACHED) { 1584 if (db->db_blkptr == NULL) { 1585 bpp = NULL; 1586 } else { 1587 bp = *db->db_blkptr; 1588 bpp = &bp; 1589 } 1590 } else { 1591 dbuf_dirty_record_t *dr; 1592 1593 ASSERT3S(db->db_state, ==, DB_NOFILL); 1594 1595 /* 1596 * Block cloning: If we have a pending block clone, 1597 * we don't want to read the underlying block, but the content 1598 * of the block being cloned, so we have the most recent data. 1599 */ 1600 dr = list_head(&db->db_dirty_records); 1601 if (dr == NULL || !dr->dt.dl.dr_brtwrite) { 1602 err = EIO; 1603 goto early_unlock; 1604 } 1605 bp = dr->dt.dl.dr_overridden_by; 1606 bpp = &bp; 1607 } 1608 1609 err = dbuf_read_hole(db, dn, bpp); 1610 if (err == 0) 1611 goto early_unlock; 1612 1613 ASSERT(bpp != NULL); 1614 1615 /* 1616 * Any attempt to read a redacted block should result in an error. This 1617 * will never happen under normal conditions, but can be useful for 1618 * debugging purposes. 1619 */ 1620 if (BP_IS_REDACTED(bpp)) { 1621 ASSERT(dsl_dataset_feature_is_active( 1622 db->db_objset->os_dsl_dataset, 1623 SPA_FEATURE_REDACTED_DATASETS)); 1624 err = SET_ERROR(EIO); 1625 goto early_unlock; 1626 } 1627 1628 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1629 db->db.db_object, db->db_level, db->db_blkid); 1630 1631 /* 1632 * All bps of an encrypted os should have the encryption bit set. 1633 * If this is not true it indicates tampering and we report an error. 1634 */ 1635 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) { 1636 spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth); 1637 zfs_panic_recover("unencrypted block in encrypted " 1638 "object set %llu", dmu_objset_id(db->db_objset)); 1639 err = SET_ERROR(EIO); 1640 goto early_unlock; 1641 } 1642 1643 err = dbuf_read_verify_dnode_crypt(db, flags); 1644 if (err != 0) 1645 goto early_unlock; 1646 1647 DB_DNODE_EXIT(db); 1648 1649 db->db_state = DB_READ; 1650 DTRACE_SET_STATE(db, "read issued"); 1651 mutex_exit(&db->db_mtx); 1652 1653 if (!DBUF_IS_CACHEABLE(db)) 1654 aflags |= ARC_FLAG_UNCACHED; 1655 else if (dbuf_is_l2cacheable(db)) 1656 aflags |= ARC_FLAG_L2CACHE; 1657 1658 dbuf_add_ref(db, NULL); 1659 1660 zio_flags = (flags & DB_RF_CANFAIL) ? 1661 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; 1662 1663 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) 1664 zio_flags |= ZIO_FLAG_RAW; 1665 /* 1666 * The zio layer will copy the provided blkptr later, but we have our 1667 * own copy so that we can release the parent's rwlock. We have to 1668 * do that so that if dbuf_read_done is called synchronously (on 1669 * an l1 cache hit) we don't acquire the db_mtx while holding the 1670 * parent's rwlock, which would be a lock ordering violation. 1671 */ 1672 dmu_buf_unlock_parent(db, dblt, tag); 1673 (void) arc_read(zio, db->db_objset->os_spa, bpp, 1674 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, 1675 &aflags, &zb); 1676 return (err); 1677 early_unlock: 1678 DB_DNODE_EXIT(db); 1679 mutex_exit(&db->db_mtx); 1680 dmu_buf_unlock_parent(db, dblt, tag); 1681 return (err); 1682 } 1683 1684 /* 1685 * This is our just-in-time copy function. It makes a copy of buffers that 1686 * have been modified in a previous transaction group before we access them in 1687 * the current active group. 1688 * 1689 * This function is used in three places: when we are dirtying a buffer for the 1690 * first time in a txg, when we are freeing a range in a dnode that includes 1691 * this buffer, and when we are accessing a buffer which was received compressed 1692 * and later referenced in a WRITE_BYREF record. 1693 * 1694 * Note that when we are called from dbuf_free_range() we do not put a hold on 1695 * the buffer, we just traverse the active dbuf list for the dnode. 1696 */ 1697 static void 1698 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1699 { 1700 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 1701 1702 ASSERT(MUTEX_HELD(&db->db_mtx)); 1703 ASSERT(db->db.db_data != NULL); 1704 ASSERT(db->db_level == 0); 1705 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1706 1707 if (dr == NULL || 1708 (dr->dt.dl.dr_data != 1709 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1710 return; 1711 1712 /* 1713 * If the last dirty record for this dbuf has not yet synced 1714 * and its referencing the dbuf data, either: 1715 * reset the reference to point to a new copy, 1716 * or (if there a no active holders) 1717 * just null out the current db_data pointer. 1718 */ 1719 ASSERT3U(dr->dr_txg, >=, txg - 2); 1720 if (db->db_blkid == DMU_BONUS_BLKID) { 1721 dnode_t *dn = DB_DNODE(db); 1722 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1723 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); 1724 arc_space_consume(bonuslen, ARC_SPACE_BONUS); 1725 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen); 1726 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1727 dnode_t *dn = DB_DNODE(db); 1728 int size = arc_buf_size(db->db_buf); 1729 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1730 spa_t *spa = db->db_objset->os_spa; 1731 enum zio_compress compress_type = 1732 arc_get_compression(db->db_buf); 1733 uint8_t complevel = arc_get_complevel(db->db_buf); 1734 1735 if (arc_is_encrypted(db->db_buf)) { 1736 boolean_t byteorder; 1737 uint8_t salt[ZIO_DATA_SALT_LEN]; 1738 uint8_t iv[ZIO_DATA_IV_LEN]; 1739 uint8_t mac[ZIO_DATA_MAC_LEN]; 1740 1741 arc_get_raw_params(db->db_buf, &byteorder, salt, 1742 iv, mac); 1743 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, 1744 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, 1745 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), 1746 compress_type, complevel); 1747 } else if (compress_type != ZIO_COMPRESS_OFF) { 1748 ASSERT3U(type, ==, ARC_BUFC_DATA); 1749 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1750 size, arc_buf_lsize(db->db_buf), compress_type, 1751 complevel); 1752 } else { 1753 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1754 } 1755 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size); 1756 } else { 1757 db->db_buf = NULL; 1758 dbuf_clear_data(db); 1759 } 1760 } 1761 1762 int 1763 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1764 { 1765 int err = 0; 1766 boolean_t prefetch; 1767 dnode_t *dn; 1768 1769 /* 1770 * We don't have to hold the mutex to check db_state because it 1771 * can't be freed while we have a hold on the buffer. 1772 */ 1773 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1774 1775 DB_DNODE_ENTER(db); 1776 dn = DB_DNODE(db); 1777 1778 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1779 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL; 1780 1781 mutex_enter(&db->db_mtx); 1782 if (flags & DB_RF_PARTIAL_FIRST) 1783 db->db_partial_read = B_TRUE; 1784 else if (!(flags & DB_RF_PARTIAL_MORE)) 1785 db->db_partial_read = B_FALSE; 1786 if (db->db_state == DB_CACHED) { 1787 /* 1788 * Ensure that this block's dnode has been decrypted if 1789 * the caller has requested decrypted data. 1790 */ 1791 err = dbuf_read_verify_dnode_crypt(db, flags); 1792 1793 /* 1794 * If the arc buf is compressed or encrypted and the caller 1795 * requested uncompressed data, we need to untransform it 1796 * before returning. We also call arc_untransform() on any 1797 * unauthenticated blocks, which will verify their MAC if 1798 * the key is now available. 1799 */ 1800 if (err == 0 && db->db_buf != NULL && 1801 (flags & DB_RF_NO_DECRYPT) == 0 && 1802 (arc_is_encrypted(db->db_buf) || 1803 arc_is_unauthenticated(db->db_buf) || 1804 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 1805 spa_t *spa = dn->dn_objset->os_spa; 1806 zbookmark_phys_t zb; 1807 1808 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1809 db->db.db_object, db->db_level, db->db_blkid); 1810 dbuf_fix_old_data(db, spa_syncing_txg(spa)); 1811 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 1812 dbuf_set_data(db, db->db_buf); 1813 } 1814 mutex_exit(&db->db_mtx); 1815 if (err == 0 && prefetch) { 1816 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1817 B_FALSE, flags & DB_RF_HAVESTRUCT); 1818 } 1819 DB_DNODE_EXIT(db); 1820 DBUF_STAT_BUMP(hash_hits); 1821 } else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) { 1822 boolean_t need_wait = B_FALSE; 1823 1824 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 1825 1826 if (zio == NULL && (db->db_state == DB_NOFILL || 1827 (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) { 1828 spa_t *spa = dn->dn_objset->os_spa; 1829 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1830 need_wait = B_TRUE; 1831 } 1832 err = dbuf_read_impl(db, zio, flags, dblt, FTAG); 1833 /* 1834 * dbuf_read_impl has dropped db_mtx and our parent's rwlock 1835 * for us 1836 */ 1837 if (!err && prefetch) { 1838 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1839 db->db_state != DB_CACHED, 1840 flags & DB_RF_HAVESTRUCT); 1841 } 1842 1843 DB_DNODE_EXIT(db); 1844 DBUF_STAT_BUMP(hash_misses); 1845 1846 /* 1847 * If we created a zio_root we must execute it to avoid 1848 * leaking it, even if it isn't attached to any work due 1849 * to an error in dbuf_read_impl(). 1850 */ 1851 if (need_wait) { 1852 if (err == 0) 1853 err = zio_wait(zio); 1854 else 1855 VERIFY0(zio_wait(zio)); 1856 } 1857 } else { 1858 /* 1859 * Another reader came in while the dbuf was in flight 1860 * between UNCACHED and CACHED. Either a writer will finish 1861 * writing the buffer (sending the dbuf to CACHED) or the 1862 * first reader's request will reach the read_done callback 1863 * and send the dbuf to CACHED. Otherwise, a failure 1864 * occurred and the dbuf went to UNCACHED. 1865 */ 1866 mutex_exit(&db->db_mtx); 1867 if (prefetch) { 1868 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, 1869 B_TRUE, flags & DB_RF_HAVESTRUCT); 1870 } 1871 DB_DNODE_EXIT(db); 1872 DBUF_STAT_BUMP(hash_misses); 1873 1874 /* Skip the wait per the caller's request. */ 1875 if ((flags & DB_RF_NEVERWAIT) == 0) { 1876 mutex_enter(&db->db_mtx); 1877 while (db->db_state == DB_READ || 1878 db->db_state == DB_FILL) { 1879 ASSERT(db->db_state == DB_READ || 1880 (flags & DB_RF_HAVESTRUCT) == 0); 1881 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1882 db, zio_t *, zio); 1883 cv_wait(&db->db_changed, &db->db_mtx); 1884 } 1885 if (db->db_state == DB_UNCACHED) 1886 err = SET_ERROR(EIO); 1887 mutex_exit(&db->db_mtx); 1888 } 1889 } 1890 1891 return (err); 1892 } 1893 1894 static void 1895 dbuf_noread(dmu_buf_impl_t *db) 1896 { 1897 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1898 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1899 mutex_enter(&db->db_mtx); 1900 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1901 cv_wait(&db->db_changed, &db->db_mtx); 1902 if (db->db_state == DB_UNCACHED) { 1903 ASSERT(db->db_buf == NULL); 1904 ASSERT(db->db.db_data == NULL); 1905 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1906 db->db_state = DB_FILL; 1907 DTRACE_SET_STATE(db, "assigning filled buffer"); 1908 } else if (db->db_state == DB_NOFILL) { 1909 dbuf_clear_data(db); 1910 } else { 1911 ASSERT3U(db->db_state, ==, DB_CACHED); 1912 } 1913 mutex_exit(&db->db_mtx); 1914 } 1915 1916 void 1917 dbuf_unoverride(dbuf_dirty_record_t *dr) 1918 { 1919 dmu_buf_impl_t *db = dr->dr_dbuf; 1920 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1921 uint64_t txg = dr->dr_txg; 1922 boolean_t release; 1923 1924 ASSERT(MUTEX_HELD(&db->db_mtx)); 1925 /* 1926 * This assert is valid because dmu_sync() expects to be called by 1927 * a zilog's get_data while holding a range lock. This call only 1928 * comes from dbuf_dirty() callers who must also hold a range lock. 1929 */ 1930 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1931 ASSERT(db->db_level == 0); 1932 1933 if (db->db_blkid == DMU_BONUS_BLKID || 1934 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1935 return; 1936 1937 ASSERT(db->db_data_pending != dr); 1938 1939 /* free this block */ 1940 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1941 zio_free(db->db_objset->os_spa, txg, bp); 1942 1943 release = !dr->dt.dl.dr_brtwrite; 1944 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1945 dr->dt.dl.dr_nopwrite = B_FALSE; 1946 dr->dt.dl.dr_brtwrite = B_FALSE; 1947 dr->dt.dl.dr_has_raw_params = B_FALSE; 1948 1949 /* 1950 * Release the already-written buffer, so we leave it in 1951 * a consistent dirty state. Note that all callers are 1952 * modifying the buffer, so they will immediately do 1953 * another (redundant) arc_release(). Therefore, leave 1954 * the buf thawed to save the effort of freezing & 1955 * immediately re-thawing it. 1956 */ 1957 if (release) 1958 arc_release(dr->dt.dl.dr_data, db); 1959 } 1960 1961 /* 1962 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1963 * data blocks in the free range, so that any future readers will find 1964 * empty blocks. 1965 */ 1966 void 1967 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1968 dmu_tx_t *tx) 1969 { 1970 dmu_buf_impl_t *db_search; 1971 dmu_buf_impl_t *db, *db_next; 1972 uint64_t txg = tx->tx_txg; 1973 avl_index_t where; 1974 dbuf_dirty_record_t *dr; 1975 1976 if (end_blkid > dn->dn_maxblkid && 1977 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1978 end_blkid = dn->dn_maxblkid; 1979 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid, 1980 (u_longlong_t)end_blkid); 1981 1982 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); 1983 db_search->db_level = 0; 1984 db_search->db_blkid = start_blkid; 1985 db_search->db_state = DB_SEARCH; 1986 1987 mutex_enter(&dn->dn_dbufs_mtx); 1988 db = avl_find(&dn->dn_dbufs, db_search, &where); 1989 ASSERT3P(db, ==, NULL); 1990 1991 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1992 1993 for (; db != NULL; db = db_next) { 1994 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1995 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1996 1997 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1998 break; 1999 } 2000 ASSERT3U(db->db_blkid, >=, start_blkid); 2001 2002 /* found a level 0 buffer in the range */ 2003 mutex_enter(&db->db_mtx); 2004 if (dbuf_undirty(db, tx)) { 2005 /* mutex has been dropped and dbuf destroyed */ 2006 continue; 2007 } 2008 2009 if (db->db_state == DB_UNCACHED || 2010 db->db_state == DB_NOFILL || 2011 db->db_state == DB_EVICTING) { 2012 ASSERT(db->db.db_data == NULL); 2013 mutex_exit(&db->db_mtx); 2014 continue; 2015 } 2016 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 2017 /* will be handled in dbuf_read_done or dbuf_rele */ 2018 db->db_freed_in_flight = TRUE; 2019 mutex_exit(&db->db_mtx); 2020 continue; 2021 } 2022 if (zfs_refcount_count(&db->db_holds) == 0) { 2023 ASSERT(db->db_buf); 2024 dbuf_destroy(db); 2025 continue; 2026 } 2027 /* The dbuf is referenced */ 2028 2029 dr = list_head(&db->db_dirty_records); 2030 if (dr != NULL) { 2031 if (dr->dr_txg == txg) { 2032 /* 2033 * This buffer is "in-use", re-adjust the file 2034 * size to reflect that this buffer may 2035 * contain new data when we sync. 2036 */ 2037 if (db->db_blkid != DMU_SPILL_BLKID && 2038 db->db_blkid > dn->dn_maxblkid) 2039 dn->dn_maxblkid = db->db_blkid; 2040 dbuf_unoverride(dr); 2041 } else { 2042 /* 2043 * This dbuf is not dirty in the open context. 2044 * Either uncache it (if its not referenced in 2045 * the open context) or reset its contents to 2046 * empty. 2047 */ 2048 dbuf_fix_old_data(db, txg); 2049 } 2050 } 2051 /* clear the contents if its cached */ 2052 if (db->db_state == DB_CACHED) { 2053 ASSERT(db->db.db_data != NULL); 2054 arc_release(db->db_buf, db); 2055 rw_enter(&db->db_rwlock, RW_WRITER); 2056 memset(db->db.db_data, 0, db->db.db_size); 2057 rw_exit(&db->db_rwlock); 2058 arc_buf_freeze(db->db_buf); 2059 } 2060 2061 mutex_exit(&db->db_mtx); 2062 } 2063 2064 mutex_exit(&dn->dn_dbufs_mtx); 2065 kmem_free(db_search, sizeof (dmu_buf_impl_t)); 2066 } 2067 2068 void 2069 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 2070 { 2071 arc_buf_t *buf, *old_buf; 2072 dbuf_dirty_record_t *dr; 2073 int osize = db->db.db_size; 2074 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2075 dnode_t *dn; 2076 2077 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2078 2079 DB_DNODE_ENTER(db); 2080 dn = DB_DNODE(db); 2081 2082 /* 2083 * XXX we should be doing a dbuf_read, checking the return 2084 * value and returning that up to our callers 2085 */ 2086 dmu_buf_will_dirty(&db->db, tx); 2087 2088 /* create the data buffer for the new block */ 2089 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 2090 2091 /* copy old block data to the new block */ 2092 old_buf = db->db_buf; 2093 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size)); 2094 /* zero the remainder */ 2095 if (size > osize) 2096 memset((uint8_t *)buf->b_data + osize, 0, size - osize); 2097 2098 mutex_enter(&db->db_mtx); 2099 dbuf_set_data(db, buf); 2100 arc_buf_destroy(old_buf, db); 2101 db->db.db_size = size; 2102 2103 dr = list_head(&db->db_dirty_records); 2104 /* dirty record added by dmu_buf_will_dirty() */ 2105 VERIFY(dr != NULL); 2106 if (db->db_level == 0) 2107 dr->dt.dl.dr_data = buf; 2108 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2109 ASSERT3U(dr->dr_accounted, ==, osize); 2110 dr->dr_accounted = size; 2111 mutex_exit(&db->db_mtx); 2112 2113 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 2114 DB_DNODE_EXIT(db); 2115 } 2116 2117 void 2118 dbuf_release_bp(dmu_buf_impl_t *db) 2119 { 2120 objset_t *os __maybe_unused = db->db_objset; 2121 2122 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 2123 ASSERT(arc_released(os->os_phys_buf) || 2124 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 2125 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 2126 2127 (void) arc_release(db->db_buf, db); 2128 } 2129 2130 /* 2131 * We already have a dirty record for this TXG, and we are being 2132 * dirtied again. 2133 */ 2134 static void 2135 dbuf_redirty(dbuf_dirty_record_t *dr) 2136 { 2137 dmu_buf_impl_t *db = dr->dr_dbuf; 2138 2139 ASSERT(MUTEX_HELD(&db->db_mtx)); 2140 2141 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 2142 /* 2143 * If this buffer has already been written out, 2144 * we now need to reset its state. 2145 */ 2146 dbuf_unoverride(dr); 2147 if (db->db.db_object != DMU_META_DNODE_OBJECT && 2148 db->db_state != DB_NOFILL) { 2149 /* Already released on initial dirty, so just thaw. */ 2150 ASSERT(arc_released(db->db_buf)); 2151 arc_buf_thaw(db->db_buf); 2152 } 2153 } 2154 } 2155 2156 dbuf_dirty_record_t * 2157 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx) 2158 { 2159 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2160 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid); 2161 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE); 2162 ASSERT(dn->dn_maxblkid >= blkid); 2163 2164 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP); 2165 list_link_init(&dr->dr_dirty_node); 2166 list_link_init(&dr->dr_dbuf_node); 2167 dr->dr_dnode = dn; 2168 dr->dr_txg = tx->tx_txg; 2169 dr->dt.dll.dr_blkid = blkid; 2170 dr->dr_accounted = dn->dn_datablksz; 2171 2172 /* 2173 * There should not be any dbuf for the block that we're dirtying. 2174 * Otherwise the buffer contents could be inconsistent between the 2175 * dbuf and the lightweight dirty record. 2176 */ 2177 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid, 2178 NULL)); 2179 2180 mutex_enter(&dn->dn_mtx); 2181 int txgoff = tx->tx_txg & TXG_MASK; 2182 if (dn->dn_free_ranges[txgoff] != NULL) { 2183 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1); 2184 } 2185 2186 if (dn->dn_nlevels == 1) { 2187 ASSERT3U(blkid, <, dn->dn_nblkptr); 2188 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2189 mutex_exit(&dn->dn_mtx); 2190 rw_exit(&dn->dn_struct_rwlock); 2191 dnode_setdirty(dn, tx); 2192 } else { 2193 mutex_exit(&dn->dn_mtx); 2194 2195 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2196 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn, 2197 1, blkid >> epbs, FTAG); 2198 rw_exit(&dn->dn_struct_rwlock); 2199 if (parent_db == NULL) { 2200 kmem_free(dr, sizeof (*dr)); 2201 return (NULL); 2202 } 2203 int err = dbuf_read(parent_db, NULL, 2204 (DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2205 if (err != 0) { 2206 dbuf_rele(parent_db, FTAG); 2207 kmem_free(dr, sizeof (*dr)); 2208 return (NULL); 2209 } 2210 2211 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx); 2212 dbuf_rele(parent_db, FTAG); 2213 mutex_enter(&parent_dr->dt.di.dr_mtx); 2214 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg); 2215 list_insert_tail(&parent_dr->dt.di.dr_children, dr); 2216 mutex_exit(&parent_dr->dt.di.dr_mtx); 2217 dr->dr_parent = parent_dr; 2218 } 2219 2220 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx); 2221 2222 return (dr); 2223 } 2224 2225 dbuf_dirty_record_t * 2226 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2227 { 2228 dnode_t *dn; 2229 objset_t *os; 2230 dbuf_dirty_record_t *dr, *dr_next, *dr_head; 2231 int txgoff = tx->tx_txg & TXG_MASK; 2232 boolean_t drop_struct_rwlock = B_FALSE; 2233 2234 ASSERT(tx->tx_txg != 0); 2235 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2236 DMU_TX_DIRTY_BUF(tx, db); 2237 2238 DB_DNODE_ENTER(db); 2239 dn = DB_DNODE(db); 2240 /* 2241 * Shouldn't dirty a regular buffer in syncing context. Private 2242 * objects may be dirtied in syncing context, but only if they 2243 * were already pre-dirtied in open context. 2244 */ 2245 #ifdef ZFS_DEBUG 2246 if (dn->dn_objset->os_dsl_dataset != NULL) { 2247 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 2248 RW_READER, FTAG); 2249 } 2250 ASSERT(!dmu_tx_is_syncing(tx) || 2251 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 2252 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2253 dn->dn_objset->os_dsl_dataset == NULL); 2254 if (dn->dn_objset->os_dsl_dataset != NULL) 2255 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 2256 #endif 2257 /* 2258 * We make this assert for private objects as well, but after we 2259 * check if we're already dirty. They are allowed to re-dirty 2260 * in syncing context. 2261 */ 2262 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2263 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2264 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2265 2266 mutex_enter(&db->db_mtx); 2267 /* 2268 * XXX make this true for indirects too? The problem is that 2269 * transactions created with dmu_tx_create_assigned() from 2270 * syncing context don't bother holding ahead. 2271 */ 2272 ASSERT(db->db_level != 0 || 2273 db->db_state == DB_CACHED || db->db_state == DB_FILL || 2274 db->db_state == DB_NOFILL); 2275 2276 mutex_enter(&dn->dn_mtx); 2277 dnode_set_dirtyctx(dn, tx, db); 2278 if (tx->tx_txg > dn->dn_dirty_txg) 2279 dn->dn_dirty_txg = tx->tx_txg; 2280 mutex_exit(&dn->dn_mtx); 2281 2282 if (db->db_blkid == DMU_SPILL_BLKID) 2283 dn->dn_have_spill = B_TRUE; 2284 2285 /* 2286 * If this buffer is already dirty, we're done. 2287 */ 2288 dr_head = list_head(&db->db_dirty_records); 2289 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg || 2290 db->db.db_object == DMU_META_DNODE_OBJECT); 2291 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg); 2292 if (dr_next && dr_next->dr_txg == tx->tx_txg) { 2293 DB_DNODE_EXIT(db); 2294 2295 dbuf_redirty(dr_next); 2296 mutex_exit(&db->db_mtx); 2297 return (dr_next); 2298 } 2299 2300 /* 2301 * Only valid if not already dirty. 2302 */ 2303 ASSERT(dn->dn_object == 0 || 2304 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2305 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2306 2307 ASSERT3U(dn->dn_nlevels, >, db->db_level); 2308 2309 /* 2310 * We should only be dirtying in syncing context if it's the 2311 * mos or we're initializing the os or it's a special object. 2312 * However, we are allowed to dirty in syncing context provided 2313 * we already dirtied it in open context. Hence we must make 2314 * this assertion only if we're not already dirty. 2315 */ 2316 os = dn->dn_objset; 2317 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 2318 #ifdef ZFS_DEBUG 2319 if (dn->dn_objset->os_dsl_dataset != NULL) 2320 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 2321 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2322 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 2323 if (dn->dn_objset->os_dsl_dataset != NULL) 2324 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 2325 #endif 2326 ASSERT(db->db.db_size != 0); 2327 2328 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2329 2330 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) { 2331 dmu_objset_willuse_space(os, db->db.db_size, tx); 2332 } 2333 2334 /* 2335 * If this buffer is dirty in an old transaction group we need 2336 * to make a copy of it so that the changes we make in this 2337 * transaction group won't leak out when we sync the older txg. 2338 */ 2339 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 2340 list_link_init(&dr->dr_dirty_node); 2341 list_link_init(&dr->dr_dbuf_node); 2342 dr->dr_dnode = dn; 2343 if (db->db_level == 0) { 2344 void *data_old = db->db_buf; 2345 2346 if (db->db_state != DB_NOFILL) { 2347 if (db->db_blkid == DMU_BONUS_BLKID) { 2348 dbuf_fix_old_data(db, tx->tx_txg); 2349 data_old = db->db.db_data; 2350 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 2351 /* 2352 * Release the data buffer from the cache so 2353 * that we can modify it without impacting 2354 * possible other users of this cached data 2355 * block. Note that indirect blocks and 2356 * private objects are not released until the 2357 * syncing state (since they are only modified 2358 * then). 2359 */ 2360 arc_release(db->db_buf, db); 2361 dbuf_fix_old_data(db, tx->tx_txg); 2362 data_old = db->db_buf; 2363 } 2364 ASSERT(data_old != NULL); 2365 } 2366 dr->dt.dl.dr_data = data_old; 2367 } else { 2368 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); 2369 list_create(&dr->dt.di.dr_children, 2370 sizeof (dbuf_dirty_record_t), 2371 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 2372 } 2373 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) { 2374 dr->dr_accounted = db->db.db_size; 2375 } 2376 dr->dr_dbuf = db; 2377 dr->dr_txg = tx->tx_txg; 2378 list_insert_before(&db->db_dirty_records, dr_next, dr); 2379 2380 /* 2381 * We could have been freed_in_flight between the dbuf_noread 2382 * and dbuf_dirty. We win, as though the dbuf_noread() had 2383 * happened after the free. 2384 */ 2385 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2386 db->db_blkid != DMU_SPILL_BLKID) { 2387 mutex_enter(&dn->dn_mtx); 2388 if (dn->dn_free_ranges[txgoff] != NULL) { 2389 range_tree_clear(dn->dn_free_ranges[txgoff], 2390 db->db_blkid, 1); 2391 } 2392 mutex_exit(&dn->dn_mtx); 2393 db->db_freed_in_flight = FALSE; 2394 } 2395 2396 /* 2397 * This buffer is now part of this txg 2398 */ 2399 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 2400 db->db_dirtycnt += 1; 2401 ASSERT3U(db->db_dirtycnt, <=, 3); 2402 2403 mutex_exit(&db->db_mtx); 2404 2405 if (db->db_blkid == DMU_BONUS_BLKID || 2406 db->db_blkid == DMU_SPILL_BLKID) { 2407 mutex_enter(&dn->dn_mtx); 2408 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2409 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2410 mutex_exit(&dn->dn_mtx); 2411 dnode_setdirty(dn, tx); 2412 DB_DNODE_EXIT(db); 2413 return (dr); 2414 } 2415 2416 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 2417 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2418 drop_struct_rwlock = B_TRUE; 2419 } 2420 2421 /* 2422 * If we are overwriting a dedup BP, then unless it is snapshotted, 2423 * when we get to syncing context we will need to decrement its 2424 * refcount in the DDT. Prefetch the relevant DDT block so that 2425 * syncing context won't have to wait for the i/o. 2426 */ 2427 if (db->db_blkptr != NULL) { 2428 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 2429 ddt_prefetch(os->os_spa, db->db_blkptr); 2430 dmu_buf_unlock_parent(db, dblt, FTAG); 2431 } 2432 2433 /* 2434 * We need to hold the dn_struct_rwlock to make this assertion, 2435 * because it protects dn_phys / dn_next_nlevels from changing. 2436 */ 2437 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 2438 dn->dn_phys->dn_nlevels > db->db_level || 2439 dn->dn_next_nlevels[txgoff] > db->db_level || 2440 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 2441 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 2442 2443 2444 if (db->db_level == 0) { 2445 ASSERT(!db->db_objset->os_raw_receive || 2446 dn->dn_maxblkid >= db->db_blkid); 2447 dnode_new_blkid(dn, db->db_blkid, tx, 2448 drop_struct_rwlock, B_FALSE); 2449 ASSERT(dn->dn_maxblkid >= db->db_blkid); 2450 } 2451 2452 if (db->db_level+1 < dn->dn_nlevels) { 2453 dmu_buf_impl_t *parent = db->db_parent; 2454 dbuf_dirty_record_t *di; 2455 int parent_held = FALSE; 2456 2457 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 2458 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2459 parent = dbuf_hold_level(dn, db->db_level + 1, 2460 db->db_blkid >> epbs, FTAG); 2461 ASSERT(parent != NULL); 2462 parent_held = TRUE; 2463 } 2464 if (drop_struct_rwlock) 2465 rw_exit(&dn->dn_struct_rwlock); 2466 ASSERT3U(db->db_level + 1, ==, parent->db_level); 2467 di = dbuf_dirty(parent, tx); 2468 if (parent_held) 2469 dbuf_rele(parent, FTAG); 2470 2471 mutex_enter(&db->db_mtx); 2472 /* 2473 * Since we've dropped the mutex, it's possible that 2474 * dbuf_undirty() might have changed this out from under us. 2475 */ 2476 if (list_head(&db->db_dirty_records) == dr || 2477 dn->dn_object == DMU_META_DNODE_OBJECT) { 2478 mutex_enter(&di->dt.di.dr_mtx); 2479 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 2480 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2481 list_insert_tail(&di->dt.di.dr_children, dr); 2482 mutex_exit(&di->dt.di.dr_mtx); 2483 dr->dr_parent = di; 2484 } 2485 mutex_exit(&db->db_mtx); 2486 } else { 2487 ASSERT(db->db_level + 1 == dn->dn_nlevels); 2488 ASSERT(db->db_blkid < dn->dn_nblkptr); 2489 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 2490 mutex_enter(&dn->dn_mtx); 2491 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2492 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2493 mutex_exit(&dn->dn_mtx); 2494 if (drop_struct_rwlock) 2495 rw_exit(&dn->dn_struct_rwlock); 2496 } 2497 2498 dnode_setdirty(dn, tx); 2499 DB_DNODE_EXIT(db); 2500 return (dr); 2501 } 2502 2503 static void 2504 dbuf_undirty_bonus(dbuf_dirty_record_t *dr) 2505 { 2506 dmu_buf_impl_t *db = dr->dr_dbuf; 2507 2508 if (dr->dt.dl.dr_data != db->db.db_data) { 2509 struct dnode *dn = dr->dr_dnode; 2510 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 2511 2512 kmem_free(dr->dt.dl.dr_data, max_bonuslen); 2513 arc_space_return(max_bonuslen, ARC_SPACE_BONUS); 2514 } 2515 db->db_data_pending = NULL; 2516 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 2517 list_remove(&db->db_dirty_records, dr); 2518 if (dr->dr_dbuf->db_level != 0) { 2519 mutex_destroy(&dr->dt.di.dr_mtx); 2520 list_destroy(&dr->dt.di.dr_children); 2521 } 2522 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2523 ASSERT3U(db->db_dirtycnt, >, 0); 2524 db->db_dirtycnt -= 1; 2525 } 2526 2527 /* 2528 * Undirty a buffer in the transaction group referenced by the given 2529 * transaction. Return whether this evicted the dbuf. 2530 */ 2531 boolean_t 2532 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2533 { 2534 uint64_t txg = tx->tx_txg; 2535 boolean_t brtwrite; 2536 2537 ASSERT(txg != 0); 2538 2539 /* 2540 * Due to our use of dn_nlevels below, this can only be called 2541 * in open context, unless we are operating on the MOS. 2542 * From syncing context, dn_nlevels may be different from the 2543 * dn_nlevels used when dbuf was dirtied. 2544 */ 2545 ASSERT(db->db_objset == 2546 dmu_objset_pool(db->db_objset)->dp_meta_objset || 2547 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 2548 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2549 ASSERT0(db->db_level); 2550 ASSERT(MUTEX_HELD(&db->db_mtx)); 2551 2552 /* 2553 * If this buffer is not dirty, we're done. 2554 */ 2555 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg); 2556 if (dr == NULL) 2557 return (B_FALSE); 2558 ASSERT(dr->dr_dbuf == db); 2559 2560 brtwrite = dr->dt.dl.dr_brtwrite; 2561 if (brtwrite) { 2562 /* 2563 * We are freeing a block that we cloned in the same 2564 * transaction group. 2565 */ 2566 brt_pending_remove(dmu_objset_spa(db->db_objset), 2567 &dr->dt.dl.dr_overridden_by, tx); 2568 } 2569 2570 dnode_t *dn = dr->dr_dnode; 2571 2572 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2573 2574 ASSERT(db->db.db_size != 0); 2575 2576 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 2577 dr->dr_accounted, txg); 2578 2579 list_remove(&db->db_dirty_records, dr); 2580 2581 /* 2582 * Note that there are three places in dbuf_dirty() 2583 * where this dirty record may be put on a list. 2584 * Make sure to do a list_remove corresponding to 2585 * every one of those list_insert calls. 2586 */ 2587 if (dr->dr_parent) { 2588 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 2589 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 2590 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 2591 } else if (db->db_blkid == DMU_SPILL_BLKID || 2592 db->db_level + 1 == dn->dn_nlevels) { 2593 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 2594 mutex_enter(&dn->dn_mtx); 2595 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 2596 mutex_exit(&dn->dn_mtx); 2597 } 2598 2599 if (db->db_state != DB_NOFILL && !brtwrite) { 2600 dbuf_unoverride(dr); 2601 2602 ASSERT(db->db_buf != NULL); 2603 ASSERT(dr->dt.dl.dr_data != NULL); 2604 if (dr->dt.dl.dr_data != db->db_buf) 2605 arc_buf_destroy(dr->dt.dl.dr_data, db); 2606 } 2607 2608 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2609 2610 ASSERT(db->db_dirtycnt > 0); 2611 db->db_dirtycnt -= 1; 2612 2613 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 2614 ASSERT(db->db_state == DB_NOFILL || brtwrite || 2615 arc_released(db->db_buf)); 2616 dbuf_destroy(db); 2617 return (B_TRUE); 2618 } 2619 2620 return (B_FALSE); 2621 } 2622 2623 static void 2624 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) 2625 { 2626 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2627 boolean_t undirty = B_FALSE; 2628 2629 ASSERT(tx->tx_txg != 0); 2630 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2631 2632 /* 2633 * Quick check for dirtiness. For already dirty blocks, this 2634 * reduces runtime of this function by >90%, and overall performance 2635 * by 50% for some workloads (e.g. file deletion with indirect blocks 2636 * cached). 2637 */ 2638 mutex_enter(&db->db_mtx); 2639 2640 if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) { 2641 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2642 /* 2643 * It's possible that it is already dirty but not cached, 2644 * because there are some calls to dbuf_dirty() that don't 2645 * go through dmu_buf_will_dirty(). 2646 */ 2647 if (dr != NULL) { 2648 if (dr->dt.dl.dr_brtwrite) { 2649 /* 2650 * Block cloning: If we are dirtying a cloned 2651 * block, we cannot simply redirty it, because 2652 * this dr has no data associated with it. 2653 * We will go through a full undirtying below, 2654 * before dirtying it again. 2655 */ 2656 undirty = B_TRUE; 2657 } else { 2658 /* This dbuf is already dirty and cached. */ 2659 dbuf_redirty(dr); 2660 mutex_exit(&db->db_mtx); 2661 return; 2662 } 2663 } 2664 } 2665 mutex_exit(&db->db_mtx); 2666 2667 DB_DNODE_ENTER(db); 2668 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 2669 flags |= DB_RF_HAVESTRUCT; 2670 DB_DNODE_EXIT(db); 2671 2672 /* 2673 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we 2674 * want to make sure dbuf_read() will read the pending cloned block and 2675 * not the uderlying block that is being replaced. dbuf_undirty() will 2676 * do dbuf_unoverride(), so we will end up with cloned block content, 2677 * without overridden BP. 2678 */ 2679 (void) dbuf_read(db, NULL, flags); 2680 if (undirty) { 2681 mutex_enter(&db->db_mtx); 2682 VERIFY(!dbuf_undirty(db, tx)); 2683 mutex_exit(&db->db_mtx); 2684 } 2685 (void) dbuf_dirty(db, tx); 2686 } 2687 2688 void 2689 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2690 { 2691 dmu_buf_will_dirty_impl(db_fake, 2692 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); 2693 } 2694 2695 boolean_t 2696 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2697 { 2698 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2699 dbuf_dirty_record_t *dr; 2700 2701 mutex_enter(&db->db_mtx); 2702 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2703 mutex_exit(&db->db_mtx); 2704 return (dr != NULL); 2705 } 2706 2707 void 2708 dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx) 2709 { 2710 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2711 2712 /* 2713 * Block cloning: We are going to clone into this block, so undirty 2714 * modifications done to this block so far in this txg. This includes 2715 * writes and clones into this block. 2716 */ 2717 mutex_enter(&db->db_mtx); 2718 DBUF_VERIFY(db); 2719 VERIFY(!dbuf_undirty(db, tx)); 2720 ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg)); 2721 if (db->db_buf != NULL) { 2722 arc_buf_destroy(db->db_buf, db); 2723 db->db_buf = NULL; 2724 dbuf_clear_data(db); 2725 } 2726 2727 db->db_state = DB_NOFILL; 2728 DTRACE_SET_STATE(db, "allocating NOFILL buffer for clone"); 2729 2730 DBUF_VERIFY(db); 2731 mutex_exit(&db->db_mtx); 2732 2733 dbuf_noread(db); 2734 (void) dbuf_dirty(db, tx); 2735 } 2736 2737 void 2738 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2739 { 2740 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2741 2742 mutex_enter(&db->db_mtx); 2743 db->db_state = DB_NOFILL; 2744 DTRACE_SET_STATE(db, "allocating NOFILL buffer"); 2745 mutex_exit(&db->db_mtx); 2746 2747 dbuf_noread(db); 2748 (void) dbuf_dirty(db, tx); 2749 } 2750 2751 void 2752 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2753 { 2754 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2755 2756 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2757 ASSERT(tx->tx_txg != 0); 2758 ASSERT(db->db_level == 0); 2759 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2760 2761 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 2762 dmu_tx_private_ok(tx)); 2763 2764 mutex_enter(&db->db_mtx); 2765 if (db->db_state == DB_NOFILL) { 2766 /* 2767 * Block cloning: We will be completely overwriting a block 2768 * cloned in this transaction group, so let's undirty the 2769 * pending clone and mark the block as uncached. This will be 2770 * as if the clone was never done. 2771 */ 2772 VERIFY(!dbuf_undirty(db, tx)); 2773 db->db_state = DB_UNCACHED; 2774 } 2775 mutex_exit(&db->db_mtx); 2776 2777 dbuf_noread(db); 2778 (void) dbuf_dirty(db, tx); 2779 } 2780 2781 /* 2782 * This function is effectively the same as dmu_buf_will_dirty(), but 2783 * indicates the caller expects raw encrypted data in the db, and provides 2784 * the crypt params (byteorder, salt, iv, mac) which should be stored in the 2785 * blkptr_t when this dbuf is written. This is only used for blocks of 2786 * dnodes, during raw receive. 2787 */ 2788 void 2789 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, 2790 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) 2791 { 2792 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2793 dbuf_dirty_record_t *dr; 2794 2795 /* 2796 * dr_has_raw_params is only processed for blocks of dnodes 2797 * (see dbuf_sync_dnode_leaf_crypt()). 2798 */ 2799 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 2800 ASSERT3U(db->db_level, ==, 0); 2801 ASSERT(db->db_objset->os_raw_receive); 2802 2803 dmu_buf_will_dirty_impl(db_fake, 2804 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); 2805 2806 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2807 2808 ASSERT3P(dr, !=, NULL); 2809 2810 dr->dt.dl.dr_has_raw_params = B_TRUE; 2811 dr->dt.dl.dr_byteorder = byteorder; 2812 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN); 2813 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN); 2814 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN); 2815 } 2816 2817 static void 2818 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx) 2819 { 2820 struct dirty_leaf *dl; 2821 dbuf_dirty_record_t *dr; 2822 2823 dr = list_head(&db->db_dirty_records); 2824 ASSERT3P(dr, !=, NULL); 2825 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2826 dl = &dr->dt.dl; 2827 dl->dr_overridden_by = *bp; 2828 dl->dr_override_state = DR_OVERRIDDEN; 2829 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2830 } 2831 2832 void 2833 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx) 2834 { 2835 (void) tx; 2836 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2837 dbuf_states_t old_state; 2838 mutex_enter(&db->db_mtx); 2839 DBUF_VERIFY(db); 2840 2841 old_state = db->db_state; 2842 db->db_state = DB_CACHED; 2843 if (old_state == DB_FILL) { 2844 if (db->db_level == 0 && db->db_freed_in_flight) { 2845 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2846 /* we were freed while filling */ 2847 /* XXX dbuf_undirty? */ 2848 memset(db->db.db_data, 0, db->db.db_size); 2849 db->db_freed_in_flight = FALSE; 2850 DTRACE_SET_STATE(db, 2851 "fill done handling freed in flight"); 2852 } else { 2853 DTRACE_SET_STATE(db, "fill done"); 2854 } 2855 cv_broadcast(&db->db_changed); 2856 } 2857 mutex_exit(&db->db_mtx); 2858 } 2859 2860 void 2861 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 2862 bp_embedded_type_t etype, enum zio_compress comp, 2863 int uncompressed_size, int compressed_size, int byteorder, 2864 dmu_tx_t *tx) 2865 { 2866 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2867 struct dirty_leaf *dl; 2868 dmu_object_type_t type; 2869 dbuf_dirty_record_t *dr; 2870 2871 if (etype == BP_EMBEDDED_TYPE_DATA) { 2872 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 2873 SPA_FEATURE_EMBEDDED_DATA)); 2874 } 2875 2876 DB_DNODE_ENTER(db); 2877 type = DB_DNODE(db)->dn_type; 2878 DB_DNODE_EXIT(db); 2879 2880 ASSERT0(db->db_level); 2881 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2882 2883 dmu_buf_will_not_fill(dbuf, tx); 2884 2885 dr = list_head(&db->db_dirty_records); 2886 ASSERT3P(dr, !=, NULL); 2887 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2888 dl = &dr->dt.dl; 2889 encode_embedded_bp_compressed(&dl->dr_overridden_by, 2890 data, comp, uncompressed_size, compressed_size); 2891 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 2892 BP_SET_TYPE(&dl->dr_overridden_by, type); 2893 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 2894 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 2895 2896 dl->dr_override_state = DR_OVERRIDDEN; 2897 dl->dr_overridden_by.blk_birth = dr->dr_txg; 2898 } 2899 2900 void 2901 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx) 2902 { 2903 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2904 dmu_object_type_t type; 2905 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset, 2906 SPA_FEATURE_REDACTED_DATASETS)); 2907 2908 DB_DNODE_ENTER(db); 2909 type = DB_DNODE(db)->dn_type; 2910 DB_DNODE_EXIT(db); 2911 2912 ASSERT0(db->db_level); 2913 dmu_buf_will_not_fill(dbuf, tx); 2914 2915 blkptr_t bp = { { { {0} } } }; 2916 BP_SET_TYPE(&bp, type); 2917 BP_SET_LEVEL(&bp, 0); 2918 BP_SET_BIRTH(&bp, tx->tx_txg, 0); 2919 BP_SET_REDACTED(&bp); 2920 BPE_SET_LSIZE(&bp, dbuf->db_size); 2921 2922 dbuf_override_impl(db, &bp, tx); 2923 } 2924 2925 /* 2926 * Directly assign a provided arc buf to a given dbuf if it's not referenced 2927 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 2928 */ 2929 void 2930 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 2931 { 2932 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2933 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2934 ASSERT(db->db_level == 0); 2935 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 2936 ASSERT(buf != NULL); 2937 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); 2938 ASSERT(tx->tx_txg != 0); 2939 2940 arc_return_buf(buf, db); 2941 ASSERT(arc_released(buf)); 2942 2943 mutex_enter(&db->db_mtx); 2944 2945 while (db->db_state == DB_READ || db->db_state == DB_FILL) 2946 cv_wait(&db->db_changed, &db->db_mtx); 2947 2948 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 2949 2950 if (db->db_state == DB_CACHED && 2951 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2952 /* 2953 * In practice, we will never have a case where we have an 2954 * encrypted arc buffer while additional holds exist on the 2955 * dbuf. We don't handle this here so we simply assert that 2956 * fact instead. 2957 */ 2958 ASSERT(!arc_is_encrypted(buf)); 2959 mutex_exit(&db->db_mtx); 2960 (void) dbuf_dirty(db, tx); 2961 memcpy(db->db.db_data, buf->b_data, db->db.db_size); 2962 arc_buf_destroy(buf, db); 2963 return; 2964 } 2965 2966 if (db->db_state == DB_CACHED) { 2967 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 2968 2969 ASSERT(db->db_buf != NULL); 2970 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 2971 ASSERT(dr->dt.dl.dr_data == db->db_buf); 2972 2973 if (!arc_released(db->db_buf)) { 2974 ASSERT(dr->dt.dl.dr_override_state == 2975 DR_OVERRIDDEN); 2976 arc_release(db->db_buf, db); 2977 } 2978 dr->dt.dl.dr_data = buf; 2979 arc_buf_destroy(db->db_buf, db); 2980 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2981 arc_release(db->db_buf, db); 2982 arc_buf_destroy(db->db_buf, db); 2983 } 2984 db->db_buf = NULL; 2985 } 2986 ASSERT(db->db_buf == NULL); 2987 dbuf_set_data(db, buf); 2988 db->db_state = DB_FILL; 2989 DTRACE_SET_STATE(db, "filling assigned arcbuf"); 2990 mutex_exit(&db->db_mtx); 2991 (void) dbuf_dirty(db, tx); 2992 dmu_buf_fill_done(&db->db, tx); 2993 } 2994 2995 void 2996 dbuf_destroy(dmu_buf_impl_t *db) 2997 { 2998 dnode_t *dn; 2999 dmu_buf_impl_t *parent = db->db_parent; 3000 dmu_buf_impl_t *dndb; 3001 3002 ASSERT(MUTEX_HELD(&db->db_mtx)); 3003 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3004 3005 if (db->db_buf != NULL) { 3006 arc_buf_destroy(db->db_buf, db); 3007 db->db_buf = NULL; 3008 } 3009 3010 if (db->db_blkid == DMU_BONUS_BLKID) { 3011 int slots = DB_DNODE(db)->dn_num_slots; 3012 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 3013 if (db->db.db_data != NULL) { 3014 kmem_free(db->db.db_data, bonuslen); 3015 arc_space_return(bonuslen, ARC_SPACE_BONUS); 3016 db->db_state = DB_UNCACHED; 3017 DTRACE_SET_STATE(db, "buffer cleared"); 3018 } 3019 } 3020 3021 dbuf_clear_data(db); 3022 3023 if (multilist_link_active(&db->db_cache_link)) { 3024 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 3025 db->db_caching_status == DB_DBUF_METADATA_CACHE); 3026 3027 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 3028 3029 ASSERT0(dmu_buf_user_size(&db->db)); 3030 (void) zfs_refcount_remove_many( 3031 &dbuf_caches[db->db_caching_status].size, 3032 db->db.db_size, db); 3033 3034 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 3035 DBUF_STAT_BUMPDOWN(metadata_cache_count); 3036 } else { 3037 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 3038 DBUF_STAT_BUMPDOWN(cache_count); 3039 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 3040 db->db.db_size); 3041 } 3042 db->db_caching_status = DB_NO_CACHE; 3043 } 3044 3045 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 3046 ASSERT(db->db_data_pending == NULL); 3047 ASSERT(list_is_empty(&db->db_dirty_records)); 3048 3049 db->db_state = DB_EVICTING; 3050 DTRACE_SET_STATE(db, "buffer eviction started"); 3051 db->db_blkptr = NULL; 3052 3053 /* 3054 * Now that db_state is DB_EVICTING, nobody else can find this via 3055 * the hash table. We can now drop db_mtx, which allows us to 3056 * acquire the dn_dbufs_mtx. 3057 */ 3058 mutex_exit(&db->db_mtx); 3059 3060 DB_DNODE_ENTER(db); 3061 dn = DB_DNODE(db); 3062 dndb = dn->dn_dbuf; 3063 if (db->db_blkid != DMU_BONUS_BLKID) { 3064 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 3065 if (needlock) 3066 mutex_enter_nested(&dn->dn_dbufs_mtx, 3067 NESTED_SINGLE); 3068 avl_remove(&dn->dn_dbufs, db); 3069 membar_producer(); 3070 DB_DNODE_EXIT(db); 3071 if (needlock) 3072 mutex_exit(&dn->dn_dbufs_mtx); 3073 /* 3074 * Decrementing the dbuf count means that the hold corresponding 3075 * to the removed dbuf is no longer discounted in dnode_move(), 3076 * so the dnode cannot be moved until after we release the hold. 3077 * The membar_producer() ensures visibility of the decremented 3078 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 3079 * release any lock. 3080 */ 3081 mutex_enter(&dn->dn_mtx); 3082 dnode_rele_and_unlock(dn, db, B_TRUE); 3083 db->db_dnode_handle = NULL; 3084 3085 dbuf_hash_remove(db); 3086 } else { 3087 DB_DNODE_EXIT(db); 3088 } 3089 3090 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3091 3092 db->db_parent = NULL; 3093 3094 ASSERT(db->db_buf == NULL); 3095 ASSERT(db->db.db_data == NULL); 3096 ASSERT(db->db_hash_next == NULL); 3097 ASSERT(db->db_blkptr == NULL); 3098 ASSERT(db->db_data_pending == NULL); 3099 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 3100 ASSERT(!multilist_link_active(&db->db_cache_link)); 3101 3102 /* 3103 * If this dbuf is referenced from an indirect dbuf, 3104 * decrement the ref count on the indirect dbuf. 3105 */ 3106 if (parent && parent != dndb) { 3107 mutex_enter(&parent->db_mtx); 3108 dbuf_rele_and_unlock(parent, db, B_TRUE); 3109 } 3110 3111 kmem_cache_free(dbuf_kmem_cache, db); 3112 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3113 } 3114 3115 /* 3116 * Note: While bpp will always be updated if the function returns success, 3117 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 3118 * this happens when the dnode is the meta-dnode, or {user|group|project}used 3119 * object. 3120 */ 3121 __attribute__((always_inline)) 3122 static inline int 3123 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 3124 dmu_buf_impl_t **parentp, blkptr_t **bpp) 3125 { 3126 *parentp = NULL; 3127 *bpp = NULL; 3128 3129 ASSERT(blkid != DMU_BONUS_BLKID); 3130 3131 if (blkid == DMU_SPILL_BLKID) { 3132 mutex_enter(&dn->dn_mtx); 3133 if (dn->dn_have_spill && 3134 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 3135 *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 3136 else 3137 *bpp = NULL; 3138 dbuf_add_ref(dn->dn_dbuf, NULL); 3139 *parentp = dn->dn_dbuf; 3140 mutex_exit(&dn->dn_mtx); 3141 return (0); 3142 } 3143 3144 int nlevels = 3145 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 3146 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 3147 3148 ASSERT3U(level * epbs, <, 64); 3149 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3150 /* 3151 * This assertion shouldn't trip as long as the max indirect block size 3152 * is less than 1M. The reason for this is that up to that point, 3153 * the number of levels required to address an entire object with blocks 3154 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 3155 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 3156 * (i.e. we can address the entire object), objects will all use at most 3157 * N-1 levels and the assertion won't overflow. However, once epbs is 3158 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 3159 * enough to address an entire object, so objects will have 5 levels, 3160 * but then this assertion will overflow. 3161 * 3162 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 3163 * need to redo this logic to handle overflows. 3164 */ 3165 ASSERT(level >= nlevels || 3166 ((nlevels - level - 1) * epbs) + 3167 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 3168 if (level >= nlevels || 3169 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 3170 ((nlevels - level - 1) * epbs)) || 3171 (fail_sparse && 3172 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 3173 /* the buffer has no parent yet */ 3174 return (SET_ERROR(ENOENT)); 3175 } else if (level < nlevels-1) { 3176 /* this block is referenced from an indirect block */ 3177 int err; 3178 3179 err = dbuf_hold_impl(dn, level + 1, 3180 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 3181 3182 if (err) 3183 return (err); 3184 err = dbuf_read(*parentp, NULL, 3185 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 3186 if (err) { 3187 dbuf_rele(*parentp, NULL); 3188 *parentp = NULL; 3189 return (err); 3190 } 3191 rw_enter(&(*parentp)->db_rwlock, RW_READER); 3192 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 3193 (blkid & ((1ULL << epbs) - 1)); 3194 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 3195 ASSERT(BP_IS_HOLE(*bpp)); 3196 rw_exit(&(*parentp)->db_rwlock); 3197 return (0); 3198 } else { 3199 /* the block is referenced from the dnode */ 3200 ASSERT3U(level, ==, nlevels-1); 3201 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 3202 blkid < dn->dn_phys->dn_nblkptr); 3203 if (dn->dn_dbuf) { 3204 dbuf_add_ref(dn->dn_dbuf, NULL); 3205 *parentp = dn->dn_dbuf; 3206 } 3207 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 3208 return (0); 3209 } 3210 } 3211 3212 static dmu_buf_impl_t * 3213 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 3214 dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash) 3215 { 3216 objset_t *os = dn->dn_objset; 3217 dmu_buf_impl_t *db, *odb; 3218 3219 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3220 ASSERT(dn->dn_type != DMU_OT_NONE); 3221 3222 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 3223 3224 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t), 3225 offsetof(dbuf_dirty_record_t, dr_dbuf_node)); 3226 3227 db->db_objset = os; 3228 db->db.db_object = dn->dn_object; 3229 db->db_level = level; 3230 db->db_blkid = blkid; 3231 db->db_dirtycnt = 0; 3232 db->db_dnode_handle = dn->dn_handle; 3233 db->db_parent = parent; 3234 db->db_blkptr = blkptr; 3235 db->db_hash = hash; 3236 3237 db->db_user = NULL; 3238 db->db_user_immediate_evict = FALSE; 3239 db->db_freed_in_flight = FALSE; 3240 db->db_pending_evict = FALSE; 3241 3242 if (blkid == DMU_BONUS_BLKID) { 3243 ASSERT3P(parent, ==, dn->dn_dbuf); 3244 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 3245 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 3246 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 3247 db->db.db_offset = DMU_BONUS_BLKID; 3248 db->db_state = DB_UNCACHED; 3249 DTRACE_SET_STATE(db, "bonus buffer created"); 3250 db->db_caching_status = DB_NO_CACHE; 3251 /* the bonus dbuf is not placed in the hash table */ 3252 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3253 return (db); 3254 } else if (blkid == DMU_SPILL_BLKID) { 3255 db->db.db_size = (blkptr != NULL) ? 3256 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 3257 db->db.db_offset = 0; 3258 } else { 3259 int blocksize = 3260 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 3261 db->db.db_size = blocksize; 3262 db->db.db_offset = db->db_blkid * blocksize; 3263 } 3264 3265 /* 3266 * Hold the dn_dbufs_mtx while we get the new dbuf 3267 * in the hash table *and* added to the dbufs list. 3268 * This prevents a possible deadlock with someone 3269 * trying to look up this dbuf before it's added to the 3270 * dn_dbufs list. 3271 */ 3272 mutex_enter(&dn->dn_dbufs_mtx); 3273 db->db_state = DB_EVICTING; /* not worth logging this state change */ 3274 if ((odb = dbuf_hash_insert(db)) != NULL) { 3275 /* someone else inserted it first */ 3276 mutex_exit(&dn->dn_dbufs_mtx); 3277 kmem_cache_free(dbuf_kmem_cache, db); 3278 DBUF_STAT_BUMP(hash_insert_race); 3279 return (odb); 3280 } 3281 avl_add(&dn->dn_dbufs, db); 3282 3283 db->db_state = DB_UNCACHED; 3284 DTRACE_SET_STATE(db, "regular buffer created"); 3285 db->db_caching_status = DB_NO_CACHE; 3286 mutex_exit(&dn->dn_dbufs_mtx); 3287 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3288 3289 if (parent && parent != dn->dn_dbuf) 3290 dbuf_add_ref(parent, db); 3291 3292 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 3293 zfs_refcount_count(&dn->dn_holds) > 0); 3294 (void) zfs_refcount_add(&dn->dn_holds, db); 3295 3296 dprintf_dbuf(db, "db=%p\n", db); 3297 3298 return (db); 3299 } 3300 3301 /* 3302 * This function returns a block pointer and information about the object, 3303 * given a dnode and a block. This is a publicly accessible version of 3304 * dbuf_findbp that only returns some information, rather than the 3305 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock 3306 * should be locked as (at least) a reader. 3307 */ 3308 int 3309 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid, 3310 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift) 3311 { 3312 dmu_buf_impl_t *dbp = NULL; 3313 blkptr_t *bp2; 3314 int err = 0; 3315 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3316 3317 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2); 3318 if (err == 0) { 3319 ASSERT3P(bp2, !=, NULL); 3320 *bp = *bp2; 3321 if (dbp != NULL) 3322 dbuf_rele(dbp, NULL); 3323 if (datablkszsec != NULL) 3324 *datablkszsec = dn->dn_phys->dn_datablkszsec; 3325 if (indblkshift != NULL) 3326 *indblkshift = dn->dn_phys->dn_indblkshift; 3327 } 3328 3329 return (err); 3330 } 3331 3332 typedef struct dbuf_prefetch_arg { 3333 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 3334 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 3335 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 3336 int dpa_curlevel; /* The current level that we're reading */ 3337 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 3338 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 3339 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 3340 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 3341 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */ 3342 void *dpa_arg; /* prefetch completion arg */ 3343 } dbuf_prefetch_arg_t; 3344 3345 static void 3346 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done) 3347 { 3348 if (dpa->dpa_cb != NULL) { 3349 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level, 3350 dpa->dpa_zb.zb_blkid, io_done); 3351 } 3352 kmem_free(dpa, sizeof (*dpa)); 3353 } 3354 3355 static void 3356 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb, 3357 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3358 { 3359 (void) zio, (void) zb, (void) iobp; 3360 dbuf_prefetch_arg_t *dpa = private; 3361 3362 if (abuf != NULL) 3363 arc_buf_destroy(abuf, private); 3364 3365 dbuf_prefetch_fini(dpa, B_TRUE); 3366 } 3367 3368 /* 3369 * Actually issue the prefetch read for the block given. 3370 */ 3371 static void 3372 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 3373 { 3374 ASSERT(!BP_IS_REDACTED(bp) || 3375 dsl_dataset_feature_is_active( 3376 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3377 SPA_FEATURE_REDACTED_DATASETS)); 3378 3379 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp)) 3380 return (dbuf_prefetch_fini(dpa, B_FALSE)); 3381 3382 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 3383 arc_flags_t aflags = 3384 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 3385 ARC_FLAG_NO_BUF; 3386 3387 /* dnodes are always read as raw and then converted later */ 3388 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && 3389 dpa->dpa_curlevel == 0) 3390 zio_flags |= ZIO_FLAG_RAW; 3391 3392 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3393 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 3394 ASSERT(dpa->dpa_zio != NULL); 3395 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, 3396 dbuf_issue_final_prefetch_done, dpa, 3397 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); 3398 } 3399 3400 /* 3401 * Called when an indirect block above our prefetch target is read in. This 3402 * will either read in the next indirect block down the tree or issue the actual 3403 * prefetch if the next block down is our target. 3404 */ 3405 static void 3406 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 3407 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3408 { 3409 (void) zb, (void) iobp; 3410 dbuf_prefetch_arg_t *dpa = private; 3411 3412 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 3413 ASSERT3S(dpa->dpa_curlevel, >, 0); 3414 3415 if (abuf == NULL) { 3416 ASSERT(zio == NULL || zio->io_error != 0); 3417 dbuf_prefetch_fini(dpa, B_TRUE); 3418 return; 3419 } 3420 ASSERT(zio == NULL || zio->io_error == 0); 3421 3422 /* 3423 * The dpa_dnode is only valid if we are called with a NULL 3424 * zio. This indicates that the arc_read() returned without 3425 * first calling zio_read() to issue a physical read. Once 3426 * a physical read is made the dpa_dnode must be invalidated 3427 * as the locks guarding it may have been dropped. If the 3428 * dpa_dnode is still valid, then we want to add it to the dbuf 3429 * cache. To do so, we must hold the dbuf associated with the block 3430 * we just prefetched, read its contents so that we associate it 3431 * with an arc_buf_t, and then release it. 3432 */ 3433 if (zio != NULL) { 3434 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 3435 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { 3436 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 3437 } else { 3438 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 3439 } 3440 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 3441 3442 dpa->dpa_dnode = NULL; 3443 } else if (dpa->dpa_dnode != NULL) { 3444 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 3445 (dpa->dpa_epbs * (dpa->dpa_curlevel - 3446 dpa->dpa_zb.zb_level)); 3447 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 3448 dpa->dpa_curlevel, curblkid, FTAG); 3449 if (db == NULL) { 3450 arc_buf_destroy(abuf, private); 3451 dbuf_prefetch_fini(dpa, B_TRUE); 3452 return; 3453 } 3454 (void) dbuf_read(db, NULL, 3455 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 3456 dbuf_rele(db, FTAG); 3457 } 3458 3459 dpa->dpa_curlevel--; 3460 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 3461 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 3462 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 3463 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 3464 3465 ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode && 3466 dsl_dataset_feature_is_active( 3467 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3468 SPA_FEATURE_REDACTED_DATASETS))); 3469 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 3470 arc_buf_destroy(abuf, private); 3471 dbuf_prefetch_fini(dpa, B_TRUE); 3472 return; 3473 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 3474 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 3475 dbuf_issue_final_prefetch(dpa, bp); 3476 } else { 3477 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3478 zbookmark_phys_t zb; 3479 3480 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3481 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 3482 iter_aflags |= ARC_FLAG_L2CACHE; 3483 3484 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3485 3486 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 3487 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 3488 3489 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3490 bp, dbuf_prefetch_indirect_done, dpa, 3491 ZIO_PRIORITY_SYNC_READ, 3492 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3493 &iter_aflags, &zb); 3494 } 3495 3496 arc_buf_destroy(abuf, private); 3497 } 3498 3499 /* 3500 * Issue prefetch reads for the given block on the given level. If the indirect 3501 * blocks above that block are not in memory, we will read them in 3502 * asynchronously. As a result, this call never blocks waiting for a read to 3503 * complete. Note that the prefetch might fail if the dataset is encrypted and 3504 * the encryption key is unmapped before the IO completes. 3505 */ 3506 int 3507 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid, 3508 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb, 3509 void *arg) 3510 { 3511 blkptr_t bp; 3512 int epbs, nlevels, curlevel; 3513 uint64_t curblkid; 3514 3515 ASSERT(blkid != DMU_BONUS_BLKID); 3516 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3517 3518 if (blkid > dn->dn_maxblkid) 3519 goto no_issue; 3520 3521 if (level == 0 && dnode_block_freed(dn, blkid)) 3522 goto no_issue; 3523 3524 /* 3525 * This dnode hasn't been written to disk yet, so there's nothing to 3526 * prefetch. 3527 */ 3528 nlevels = dn->dn_phys->dn_nlevels; 3529 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 3530 goto no_issue; 3531 3532 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3533 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 3534 goto no_issue; 3535 3536 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 3537 level, blkid, NULL); 3538 if (db != NULL) { 3539 mutex_exit(&db->db_mtx); 3540 /* 3541 * This dbuf already exists. It is either CACHED, or 3542 * (we assume) about to be read or filled. 3543 */ 3544 goto no_issue; 3545 } 3546 3547 /* 3548 * Find the closest ancestor (indirect block) of the target block 3549 * that is present in the cache. In this indirect block, we will 3550 * find the bp that is at curlevel, curblkid. 3551 */ 3552 curlevel = level; 3553 curblkid = blkid; 3554 while (curlevel < nlevels - 1) { 3555 int parent_level = curlevel + 1; 3556 uint64_t parent_blkid = curblkid >> epbs; 3557 dmu_buf_impl_t *db; 3558 3559 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 3560 FALSE, TRUE, FTAG, &db) == 0) { 3561 blkptr_t *bpp = db->db_buf->b_data; 3562 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 3563 dbuf_rele(db, FTAG); 3564 break; 3565 } 3566 3567 curlevel = parent_level; 3568 curblkid = parent_blkid; 3569 } 3570 3571 if (curlevel == nlevels - 1) { 3572 /* No cached indirect blocks found. */ 3573 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 3574 bp = dn->dn_phys->dn_blkptr[curblkid]; 3575 } 3576 ASSERT(!BP_IS_REDACTED(&bp) || 3577 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset, 3578 SPA_FEATURE_REDACTED_DATASETS)); 3579 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp)) 3580 goto no_issue; 3581 3582 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 3583 3584 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 3585 ZIO_FLAG_CANFAIL); 3586 3587 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 3588 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 3589 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3590 dn->dn_object, level, blkid); 3591 dpa->dpa_curlevel = curlevel; 3592 dpa->dpa_prio = prio; 3593 dpa->dpa_aflags = aflags; 3594 dpa->dpa_spa = dn->dn_objset->os_spa; 3595 dpa->dpa_dnode = dn; 3596 dpa->dpa_epbs = epbs; 3597 dpa->dpa_zio = pio; 3598 dpa->dpa_cb = cb; 3599 dpa->dpa_arg = arg; 3600 3601 if (!DNODE_LEVEL_IS_CACHEABLE(dn, level)) 3602 dpa->dpa_aflags |= ARC_FLAG_UNCACHED; 3603 else if (dnode_level_is_l2cacheable(&bp, dn, level)) 3604 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 3605 3606 /* 3607 * If we have the indirect just above us, no need to do the asynchronous 3608 * prefetch chain; we'll just run the last step ourselves. If we're at 3609 * a higher level, though, we want to issue the prefetches for all the 3610 * indirect blocks asynchronously, so we can go on with whatever we were 3611 * doing. 3612 */ 3613 if (curlevel == level) { 3614 ASSERT3U(curblkid, ==, blkid); 3615 dbuf_issue_final_prefetch(dpa, &bp); 3616 } else { 3617 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3618 zbookmark_phys_t zb; 3619 3620 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3621 if (dnode_level_is_l2cacheable(&bp, dn, level)) 3622 iter_aflags |= ARC_FLAG_L2CACHE; 3623 3624 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3625 dn->dn_object, curlevel, curblkid); 3626 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3627 &bp, dbuf_prefetch_indirect_done, dpa, 3628 ZIO_PRIORITY_SYNC_READ, 3629 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3630 &iter_aflags, &zb); 3631 } 3632 /* 3633 * We use pio here instead of dpa_zio since it's possible that 3634 * dpa may have already been freed. 3635 */ 3636 zio_nowait(pio); 3637 return (1); 3638 no_issue: 3639 if (cb != NULL) 3640 cb(arg, level, blkid, B_FALSE); 3641 return (0); 3642 } 3643 3644 int 3645 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 3646 arc_flags_t aflags) 3647 { 3648 3649 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL)); 3650 } 3651 3652 /* 3653 * Helper function for dbuf_hold_impl() to copy a buffer. Handles 3654 * the case of encrypted, compressed and uncompressed buffers by 3655 * allocating the new buffer, respectively, with arc_alloc_raw_buf(), 3656 * arc_alloc_compressed_buf() or arc_alloc_buf().* 3657 * 3658 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl(). 3659 */ 3660 noinline static void 3661 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db) 3662 { 3663 dbuf_dirty_record_t *dr = db->db_data_pending; 3664 arc_buf_t *data = dr->dt.dl.dr_data; 3665 enum zio_compress compress_type = arc_get_compression(data); 3666 uint8_t complevel = arc_get_complevel(data); 3667 3668 if (arc_is_encrypted(data)) { 3669 boolean_t byteorder; 3670 uint8_t salt[ZIO_DATA_SALT_LEN]; 3671 uint8_t iv[ZIO_DATA_IV_LEN]; 3672 uint8_t mac[ZIO_DATA_MAC_LEN]; 3673 3674 arc_get_raw_params(data, &byteorder, salt, iv, mac); 3675 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db, 3676 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, 3677 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), 3678 compress_type, complevel)); 3679 } else if (compress_type != ZIO_COMPRESS_OFF) { 3680 dbuf_set_data(db, arc_alloc_compressed_buf( 3681 dn->dn_objset->os_spa, db, arc_buf_size(data), 3682 arc_buf_lsize(data), compress_type, complevel)); 3683 } else { 3684 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db, 3685 DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 3686 } 3687 3688 rw_enter(&db->db_rwlock, RW_WRITER); 3689 memcpy(db->db.db_data, data->b_data, arc_buf_size(data)); 3690 rw_exit(&db->db_rwlock); 3691 } 3692 3693 /* 3694 * Returns with db_holds incremented, and db_mtx not held. 3695 * Note: dn_struct_rwlock must be held. 3696 */ 3697 int 3698 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 3699 boolean_t fail_sparse, boolean_t fail_uncached, 3700 const void *tag, dmu_buf_impl_t **dbp) 3701 { 3702 dmu_buf_impl_t *db, *parent = NULL; 3703 uint64_t hv; 3704 3705 /* If the pool has been created, verify the tx_sync_lock is not held */ 3706 spa_t *spa = dn->dn_objset->os_spa; 3707 dsl_pool_t *dp = spa->spa_dsl_pool; 3708 if (dp != NULL) { 3709 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock)); 3710 } 3711 3712 ASSERT(blkid != DMU_BONUS_BLKID); 3713 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3714 ASSERT3U(dn->dn_nlevels, >, level); 3715 3716 *dbp = NULL; 3717 3718 /* dbuf_find() returns with db_mtx held */ 3719 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv); 3720 3721 if (db == NULL) { 3722 blkptr_t *bp = NULL; 3723 int err; 3724 3725 if (fail_uncached) 3726 return (SET_ERROR(ENOENT)); 3727 3728 ASSERT3P(parent, ==, NULL); 3729 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 3730 if (fail_sparse) { 3731 if (err == 0 && bp && BP_IS_HOLE(bp)) 3732 err = SET_ERROR(ENOENT); 3733 if (err) { 3734 if (parent) 3735 dbuf_rele(parent, NULL); 3736 return (err); 3737 } 3738 } 3739 if (err && err != ENOENT) 3740 return (err); 3741 db = dbuf_create(dn, level, blkid, parent, bp, hv); 3742 } 3743 3744 if (fail_uncached && db->db_state != DB_CACHED) { 3745 mutex_exit(&db->db_mtx); 3746 return (SET_ERROR(ENOENT)); 3747 } 3748 3749 if (db->db_buf != NULL) { 3750 arc_buf_access(db->db_buf); 3751 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 3752 } 3753 3754 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 3755 3756 /* 3757 * If this buffer is currently syncing out, and we are 3758 * still referencing it from db_data, we need to make a copy 3759 * of it in case we decide we want to dirty it again in this txg. 3760 */ 3761 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 3762 dn->dn_object != DMU_META_DNODE_OBJECT && 3763 db->db_state == DB_CACHED && db->db_data_pending) { 3764 dbuf_dirty_record_t *dr = db->db_data_pending; 3765 if (dr->dt.dl.dr_data == db->db_buf) { 3766 ASSERT3P(db->db_buf, !=, NULL); 3767 dbuf_hold_copy(dn, db); 3768 } 3769 } 3770 3771 if (multilist_link_active(&db->db_cache_link)) { 3772 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3773 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 3774 db->db_caching_status == DB_DBUF_METADATA_CACHE); 3775 3776 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 3777 3778 uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db); 3779 (void) zfs_refcount_remove_many( 3780 &dbuf_caches[db->db_caching_status].size, size, db); 3781 3782 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 3783 DBUF_STAT_BUMPDOWN(metadata_cache_count); 3784 } else { 3785 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 3786 DBUF_STAT_BUMPDOWN(cache_count); 3787 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size); 3788 } 3789 db->db_caching_status = DB_NO_CACHE; 3790 } 3791 (void) zfs_refcount_add(&db->db_holds, tag); 3792 DBUF_VERIFY(db); 3793 mutex_exit(&db->db_mtx); 3794 3795 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 3796 if (parent) 3797 dbuf_rele(parent, NULL); 3798 3799 ASSERT3P(DB_DNODE(db), ==, dn); 3800 ASSERT3U(db->db_blkid, ==, blkid); 3801 ASSERT3U(db->db_level, ==, level); 3802 *dbp = db; 3803 3804 return (0); 3805 } 3806 3807 dmu_buf_impl_t * 3808 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag) 3809 { 3810 return (dbuf_hold_level(dn, 0, blkid, tag)); 3811 } 3812 3813 dmu_buf_impl_t * 3814 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag) 3815 { 3816 dmu_buf_impl_t *db; 3817 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 3818 return (err ? NULL : db); 3819 } 3820 3821 void 3822 dbuf_create_bonus(dnode_t *dn) 3823 { 3824 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 3825 3826 ASSERT(dn->dn_bonus == NULL); 3827 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL, 3828 dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID)); 3829 } 3830 3831 int 3832 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 3833 { 3834 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3835 3836 if (db->db_blkid != DMU_SPILL_BLKID) 3837 return (SET_ERROR(ENOTSUP)); 3838 if (blksz == 0) 3839 blksz = SPA_MINBLOCKSIZE; 3840 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 3841 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 3842 3843 dbuf_new_size(db, blksz, tx); 3844 3845 return (0); 3846 } 3847 3848 void 3849 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 3850 { 3851 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 3852 } 3853 3854 #pragma weak dmu_buf_add_ref = dbuf_add_ref 3855 void 3856 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag) 3857 { 3858 int64_t holds = zfs_refcount_add(&db->db_holds, tag); 3859 VERIFY3S(holds, >, 1); 3860 } 3861 3862 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 3863 boolean_t 3864 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 3865 const void *tag) 3866 { 3867 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3868 dmu_buf_impl_t *found_db; 3869 boolean_t result = B_FALSE; 3870 3871 if (blkid == DMU_BONUS_BLKID) 3872 found_db = dbuf_find_bonus(os, obj); 3873 else 3874 found_db = dbuf_find(os, obj, 0, blkid, NULL); 3875 3876 if (found_db != NULL) { 3877 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 3878 (void) zfs_refcount_add(&db->db_holds, tag); 3879 result = B_TRUE; 3880 } 3881 mutex_exit(&found_db->db_mtx); 3882 } 3883 return (result); 3884 } 3885 3886 /* 3887 * If you call dbuf_rele() you had better not be referencing the dnode handle 3888 * unless you have some other direct or indirect hold on the dnode. (An indirect 3889 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 3890 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 3891 * dnode's parent dbuf evicting its dnode handles. 3892 */ 3893 void 3894 dbuf_rele(dmu_buf_impl_t *db, const void *tag) 3895 { 3896 mutex_enter(&db->db_mtx); 3897 dbuf_rele_and_unlock(db, tag, B_FALSE); 3898 } 3899 3900 void 3901 dmu_buf_rele(dmu_buf_t *db, const void *tag) 3902 { 3903 dbuf_rele((dmu_buf_impl_t *)db, tag); 3904 } 3905 3906 /* 3907 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 3908 * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 3909 * argument should be set if we are already in the dbuf-evicting code 3910 * path, in which case we don't want to recursively evict. This allows us to 3911 * avoid deeply nested stacks that would have a call flow similar to this: 3912 * 3913 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 3914 * ^ | 3915 * | | 3916 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 3917 * 3918 */ 3919 void 3920 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting) 3921 { 3922 int64_t holds; 3923 uint64_t size; 3924 3925 ASSERT(MUTEX_HELD(&db->db_mtx)); 3926 DBUF_VERIFY(db); 3927 3928 /* 3929 * Remove the reference to the dbuf before removing its hold on the 3930 * dnode so we can guarantee in dnode_move() that a referenced bonus 3931 * buffer has a corresponding dnode hold. 3932 */ 3933 holds = zfs_refcount_remove(&db->db_holds, tag); 3934 ASSERT(holds >= 0); 3935 3936 /* 3937 * We can't freeze indirects if there is a possibility that they 3938 * may be modified in the current syncing context. 3939 */ 3940 if (db->db_buf != NULL && 3941 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 3942 arc_buf_freeze(db->db_buf); 3943 } 3944 3945 if (holds == db->db_dirtycnt && 3946 db->db_level == 0 && db->db_user_immediate_evict) 3947 dbuf_evict_user(db); 3948 3949 if (holds == 0) { 3950 if (db->db_blkid == DMU_BONUS_BLKID) { 3951 dnode_t *dn; 3952 boolean_t evict_dbuf = db->db_pending_evict; 3953 3954 /* 3955 * If the dnode moves here, we cannot cross this 3956 * barrier until the move completes. 3957 */ 3958 DB_DNODE_ENTER(db); 3959 3960 dn = DB_DNODE(db); 3961 atomic_dec_32(&dn->dn_dbufs_count); 3962 3963 /* 3964 * Decrementing the dbuf count means that the bonus 3965 * buffer's dnode hold is no longer discounted in 3966 * dnode_move(). The dnode cannot move until after 3967 * the dnode_rele() below. 3968 */ 3969 DB_DNODE_EXIT(db); 3970 3971 /* 3972 * Do not reference db after its lock is dropped. 3973 * Another thread may evict it. 3974 */ 3975 mutex_exit(&db->db_mtx); 3976 3977 if (evict_dbuf) 3978 dnode_evict_bonus(dn); 3979 3980 dnode_rele(dn, db); 3981 } else if (db->db_buf == NULL) { 3982 /* 3983 * This is a special case: we never associated this 3984 * dbuf with any data allocated from the ARC. 3985 */ 3986 ASSERT(db->db_state == DB_UNCACHED || 3987 db->db_state == DB_NOFILL); 3988 dbuf_destroy(db); 3989 } else if (arc_released(db->db_buf)) { 3990 /* 3991 * This dbuf has anonymous data associated with it. 3992 */ 3993 dbuf_destroy(db); 3994 } else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) || 3995 db->db_pending_evict) { 3996 dbuf_destroy(db); 3997 } else if (!multilist_link_active(&db->db_cache_link)) { 3998 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 3999 4000 dbuf_cached_state_t dcs = 4001 dbuf_include_in_metadata_cache(db) ? 4002 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 4003 db->db_caching_status = dcs; 4004 4005 multilist_insert(&dbuf_caches[dcs].cache, db); 4006 uint64_t db_size = db->db.db_size + 4007 dmu_buf_user_size(&db->db); 4008 size = zfs_refcount_add_many( 4009 &dbuf_caches[dcs].size, db_size, db); 4010 uint8_t db_level = db->db_level; 4011 mutex_exit(&db->db_mtx); 4012 4013 if (dcs == DB_DBUF_METADATA_CACHE) { 4014 DBUF_STAT_BUMP(metadata_cache_count); 4015 DBUF_STAT_MAX(metadata_cache_size_bytes_max, 4016 size); 4017 } else { 4018 DBUF_STAT_BUMP(cache_count); 4019 DBUF_STAT_MAX(cache_size_bytes_max, size); 4020 DBUF_STAT_BUMP(cache_levels[db_level]); 4021 DBUF_STAT_INCR(cache_levels_bytes[db_level], 4022 db_size); 4023 } 4024 4025 if (dcs == DB_DBUF_CACHE && !evicting) 4026 dbuf_evict_notify(size); 4027 } 4028 } else { 4029 mutex_exit(&db->db_mtx); 4030 } 4031 4032 } 4033 4034 #pragma weak dmu_buf_refcount = dbuf_refcount 4035 uint64_t 4036 dbuf_refcount(dmu_buf_impl_t *db) 4037 { 4038 return (zfs_refcount_count(&db->db_holds)); 4039 } 4040 4041 uint64_t 4042 dmu_buf_user_refcount(dmu_buf_t *db_fake) 4043 { 4044 uint64_t holds; 4045 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4046 4047 mutex_enter(&db->db_mtx); 4048 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); 4049 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; 4050 mutex_exit(&db->db_mtx); 4051 4052 return (holds); 4053 } 4054 4055 void * 4056 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 4057 dmu_buf_user_t *new_user) 4058 { 4059 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4060 4061 mutex_enter(&db->db_mtx); 4062 dbuf_verify_user(db, DBVU_NOT_EVICTING); 4063 if (db->db_user == old_user) 4064 db->db_user = new_user; 4065 else 4066 old_user = db->db_user; 4067 dbuf_verify_user(db, DBVU_NOT_EVICTING); 4068 mutex_exit(&db->db_mtx); 4069 4070 return (old_user); 4071 } 4072 4073 void * 4074 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 4075 { 4076 return (dmu_buf_replace_user(db_fake, NULL, user)); 4077 } 4078 4079 void * 4080 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 4081 { 4082 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4083 4084 db->db_user_immediate_evict = TRUE; 4085 return (dmu_buf_set_user(db_fake, user)); 4086 } 4087 4088 void * 4089 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 4090 { 4091 return (dmu_buf_replace_user(db_fake, user, NULL)); 4092 } 4093 4094 void * 4095 dmu_buf_get_user(dmu_buf_t *db_fake) 4096 { 4097 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4098 4099 dbuf_verify_user(db, DBVU_NOT_EVICTING); 4100 return (db->db_user); 4101 } 4102 4103 uint64_t 4104 dmu_buf_user_size(dmu_buf_t *db_fake) 4105 { 4106 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4107 if (db->db_user == NULL) 4108 return (0); 4109 return (atomic_load_64(&db->db_user->dbu_size)); 4110 } 4111 4112 void 4113 dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd) 4114 { 4115 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4116 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 4117 ASSERT3P(db->db_user, !=, NULL); 4118 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd); 4119 atomic_add_64(&db->db_user->dbu_size, nadd); 4120 } 4121 4122 void 4123 dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub) 4124 { 4125 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4126 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 4127 ASSERT3P(db->db_user, !=, NULL); 4128 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub); 4129 atomic_sub_64(&db->db_user->dbu_size, nsub); 4130 } 4131 4132 void 4133 dmu_buf_user_evict_wait(void) 4134 { 4135 taskq_wait(dbu_evict_taskq); 4136 } 4137 4138 blkptr_t * 4139 dmu_buf_get_blkptr(dmu_buf_t *db) 4140 { 4141 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4142 return (dbi->db_blkptr); 4143 } 4144 4145 objset_t * 4146 dmu_buf_get_objset(dmu_buf_t *db) 4147 { 4148 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4149 return (dbi->db_objset); 4150 } 4151 4152 dnode_t * 4153 dmu_buf_dnode_enter(dmu_buf_t *db) 4154 { 4155 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4156 DB_DNODE_ENTER(dbi); 4157 return (DB_DNODE(dbi)); 4158 } 4159 4160 void 4161 dmu_buf_dnode_exit(dmu_buf_t *db) 4162 { 4163 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4164 DB_DNODE_EXIT(dbi); 4165 } 4166 4167 static void 4168 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 4169 { 4170 /* ASSERT(dmu_tx_is_syncing(tx) */ 4171 ASSERT(MUTEX_HELD(&db->db_mtx)); 4172 4173 if (db->db_blkptr != NULL) 4174 return; 4175 4176 if (db->db_blkid == DMU_SPILL_BLKID) { 4177 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 4178 BP_ZERO(db->db_blkptr); 4179 return; 4180 } 4181 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 4182 /* 4183 * This buffer was allocated at a time when there was 4184 * no available blkptrs from the dnode, or it was 4185 * inappropriate to hook it in (i.e., nlevels mismatch). 4186 */ 4187 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 4188 ASSERT(db->db_parent == NULL); 4189 db->db_parent = dn->dn_dbuf; 4190 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 4191 DBUF_VERIFY(db); 4192 } else { 4193 dmu_buf_impl_t *parent = db->db_parent; 4194 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4195 4196 ASSERT(dn->dn_phys->dn_nlevels > 1); 4197 if (parent == NULL) { 4198 mutex_exit(&db->db_mtx); 4199 rw_enter(&dn->dn_struct_rwlock, RW_READER); 4200 parent = dbuf_hold_level(dn, db->db_level + 1, 4201 db->db_blkid >> epbs, db); 4202 rw_exit(&dn->dn_struct_rwlock); 4203 mutex_enter(&db->db_mtx); 4204 db->db_parent = parent; 4205 } 4206 db->db_blkptr = (blkptr_t *)parent->db.db_data + 4207 (db->db_blkid & ((1ULL << epbs) - 1)); 4208 DBUF_VERIFY(db); 4209 } 4210 } 4211 4212 static void 4213 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4214 { 4215 dmu_buf_impl_t *db = dr->dr_dbuf; 4216 void *data = dr->dt.dl.dr_data; 4217 4218 ASSERT0(db->db_level); 4219 ASSERT(MUTEX_HELD(&db->db_mtx)); 4220 ASSERT(db->db_blkid == DMU_BONUS_BLKID); 4221 ASSERT(data != NULL); 4222 4223 dnode_t *dn = dr->dr_dnode; 4224 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 4225 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 4226 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys)); 4227 4228 dbuf_sync_leaf_verify_bonus_dnode(dr); 4229 4230 dbuf_undirty_bonus(dr); 4231 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4232 } 4233 4234 /* 4235 * When syncing out a blocks of dnodes, adjust the block to deal with 4236 * encryption. Normally, we make sure the block is decrypted before writing 4237 * it. If we have crypt params, then we are writing a raw (encrypted) block, 4238 * from a raw receive. In this case, set the ARC buf's crypt params so 4239 * that the BP will be filled with the correct byteorder, salt, iv, and mac. 4240 */ 4241 static void 4242 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) 4243 { 4244 int err; 4245 dmu_buf_impl_t *db = dr->dr_dbuf; 4246 4247 ASSERT(MUTEX_HELD(&db->db_mtx)); 4248 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 4249 ASSERT3U(db->db_level, ==, 0); 4250 4251 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { 4252 zbookmark_phys_t zb; 4253 4254 /* 4255 * Unfortunately, there is currently no mechanism for 4256 * syncing context to handle decryption errors. An error 4257 * here is only possible if an attacker maliciously 4258 * changed a dnode block and updated the associated 4259 * checksums going up the block tree. 4260 */ 4261 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 4262 db->db.db_object, db->db_level, db->db_blkid); 4263 err = arc_untransform(db->db_buf, db->db_objset->os_spa, 4264 &zb, B_TRUE); 4265 if (err) 4266 panic("Invalid dnode block MAC"); 4267 } else if (dr->dt.dl.dr_has_raw_params) { 4268 (void) arc_release(dr->dt.dl.dr_data, db); 4269 arc_convert_to_raw(dr->dt.dl.dr_data, 4270 dmu_objset_id(db->db_objset), 4271 dr->dt.dl.dr_byteorder, DMU_OT_DNODE, 4272 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); 4273 } 4274 } 4275 4276 /* 4277 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it 4278 * is critical the we not allow the compiler to inline this function in to 4279 * dbuf_sync_list() thereby drastically bloating the stack usage. 4280 */ 4281 noinline static void 4282 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4283 { 4284 dmu_buf_impl_t *db = dr->dr_dbuf; 4285 dnode_t *dn = dr->dr_dnode; 4286 4287 ASSERT(dmu_tx_is_syncing(tx)); 4288 4289 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4290 4291 mutex_enter(&db->db_mtx); 4292 4293 ASSERT(db->db_level > 0); 4294 DBUF_VERIFY(db); 4295 4296 /* Read the block if it hasn't been read yet. */ 4297 if (db->db_buf == NULL) { 4298 mutex_exit(&db->db_mtx); 4299 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 4300 mutex_enter(&db->db_mtx); 4301 } 4302 ASSERT3U(db->db_state, ==, DB_CACHED); 4303 ASSERT(db->db_buf != NULL); 4304 4305 /* Indirect block size must match what the dnode thinks it is. */ 4306 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4307 dbuf_check_blkptr(dn, db); 4308 4309 /* Provide the pending dirty record to child dbufs */ 4310 db->db_data_pending = dr; 4311 4312 mutex_exit(&db->db_mtx); 4313 4314 dbuf_write(dr, db->db_buf, tx); 4315 4316 zio_t *zio = dr->dr_zio; 4317 mutex_enter(&dr->dt.di.dr_mtx); 4318 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 4319 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4320 mutex_exit(&dr->dt.di.dr_mtx); 4321 zio_nowait(zio); 4322 } 4323 4324 /* 4325 * Verify that the size of the data in our bonus buffer does not exceed 4326 * its recorded size. 4327 * 4328 * The purpose of this verification is to catch any cases in development 4329 * where the size of a phys structure (i.e space_map_phys_t) grows and, 4330 * due to incorrect feature management, older pools expect to read more 4331 * data even though they didn't actually write it to begin with. 4332 * 4333 * For a example, this would catch an error in the feature logic where we 4334 * open an older pool and we expect to write the space map histogram of 4335 * a space map with size SPACE_MAP_SIZE_V0. 4336 */ 4337 static void 4338 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr) 4339 { 4340 #ifdef ZFS_DEBUG 4341 dnode_t *dn = dr->dr_dnode; 4342 4343 /* 4344 * Encrypted bonus buffers can have data past their bonuslen. 4345 * Skip the verification of these blocks. 4346 */ 4347 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)) 4348 return; 4349 4350 uint16_t bonuslen = dn->dn_phys->dn_bonuslen; 4351 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 4352 ASSERT3U(bonuslen, <=, maxbonuslen); 4353 4354 arc_buf_t *datap = dr->dt.dl.dr_data; 4355 char *datap_end = ((char *)datap) + bonuslen; 4356 char *datap_max = ((char *)datap) + maxbonuslen; 4357 4358 /* ensure that everything is zero after our data */ 4359 for (; datap_end < datap_max; datap_end++) 4360 ASSERT(*datap_end == 0); 4361 #endif 4362 } 4363 4364 static blkptr_t * 4365 dbuf_lightweight_bp(dbuf_dirty_record_t *dr) 4366 { 4367 /* This must be a lightweight dirty record. */ 4368 ASSERT3P(dr->dr_dbuf, ==, NULL); 4369 dnode_t *dn = dr->dr_dnode; 4370 4371 if (dn->dn_phys->dn_nlevels == 1) { 4372 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr); 4373 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]); 4374 } else { 4375 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf; 4376 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 4377 VERIFY3U(parent_db->db_level, ==, 1); 4378 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn); 4379 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid); 4380 blkptr_t *bp = parent_db->db.db_data; 4381 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]); 4382 } 4383 } 4384 4385 static void 4386 dbuf_lightweight_ready(zio_t *zio) 4387 { 4388 dbuf_dirty_record_t *dr = zio->io_private; 4389 blkptr_t *bp = zio->io_bp; 4390 4391 if (zio->io_error != 0) 4392 return; 4393 4394 dnode_t *dn = dr->dr_dnode; 4395 4396 blkptr_t *bp_orig = dbuf_lightweight_bp(dr); 4397 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4398 int64_t delta = bp_get_dsize_sync(spa, bp) - 4399 bp_get_dsize_sync(spa, bp_orig); 4400 dnode_diduse_space(dn, delta); 4401 4402 uint64_t blkid = dr->dt.dll.dr_blkid; 4403 mutex_enter(&dn->dn_mtx); 4404 if (blkid > dn->dn_phys->dn_maxblkid) { 4405 ASSERT0(dn->dn_objset->os_raw_receive); 4406 dn->dn_phys->dn_maxblkid = blkid; 4407 } 4408 mutex_exit(&dn->dn_mtx); 4409 4410 if (!BP_IS_EMBEDDED(bp)) { 4411 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1; 4412 BP_SET_FILL(bp, fill); 4413 } 4414 4415 dmu_buf_impl_t *parent_db; 4416 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1); 4417 if (dr->dr_parent == NULL) { 4418 parent_db = dn->dn_dbuf; 4419 } else { 4420 parent_db = dr->dr_parent->dr_dbuf; 4421 } 4422 rw_enter(&parent_db->db_rwlock, RW_WRITER); 4423 *bp_orig = *bp; 4424 rw_exit(&parent_db->db_rwlock); 4425 } 4426 4427 static void 4428 dbuf_lightweight_done(zio_t *zio) 4429 { 4430 dbuf_dirty_record_t *dr = zio->io_private; 4431 4432 VERIFY0(zio->io_error); 4433 4434 objset_t *os = dr->dr_dnode->dn_objset; 4435 dmu_tx_t *tx = os->os_synctx; 4436 4437 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4438 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4439 } else { 4440 dsl_dataset_t *ds = os->os_dsl_dataset; 4441 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE); 4442 dsl_dataset_block_born(ds, zio->io_bp, tx); 4443 } 4444 4445 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted, 4446 zio->io_txg); 4447 4448 abd_free(dr->dt.dll.dr_abd); 4449 kmem_free(dr, sizeof (*dr)); 4450 } 4451 4452 noinline static void 4453 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4454 { 4455 dnode_t *dn = dr->dr_dnode; 4456 zio_t *pio; 4457 if (dn->dn_phys->dn_nlevels == 1) { 4458 pio = dn->dn_zio; 4459 } else { 4460 pio = dr->dr_parent->dr_zio; 4461 } 4462 4463 zbookmark_phys_t zb = { 4464 .zb_objset = dmu_objset_id(dn->dn_objset), 4465 .zb_object = dn->dn_object, 4466 .zb_level = 0, 4467 .zb_blkid = dr->dt.dll.dr_blkid, 4468 }; 4469 4470 /* 4471 * See comment in dbuf_write(). This is so that zio->io_bp_orig 4472 * will have the old BP in dbuf_lightweight_done(). 4473 */ 4474 dr->dr_bp_copy = *dbuf_lightweight_bp(dr); 4475 4476 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset), 4477 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd, 4478 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd), 4479 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL, 4480 dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE, 4481 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb); 4482 4483 zio_nowait(dr->dr_zio); 4484 } 4485 4486 /* 4487 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is 4488 * critical the we not allow the compiler to inline this function in to 4489 * dbuf_sync_list() thereby drastically bloating the stack usage. 4490 */ 4491 noinline static void 4492 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4493 { 4494 arc_buf_t **datap = &dr->dt.dl.dr_data; 4495 dmu_buf_impl_t *db = dr->dr_dbuf; 4496 dnode_t *dn = dr->dr_dnode; 4497 objset_t *os; 4498 uint64_t txg = tx->tx_txg; 4499 4500 ASSERT(dmu_tx_is_syncing(tx)); 4501 4502 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4503 4504 mutex_enter(&db->db_mtx); 4505 /* 4506 * To be synced, we must be dirtied. But we 4507 * might have been freed after the dirty. 4508 */ 4509 if (db->db_state == DB_UNCACHED) { 4510 /* This buffer has been freed since it was dirtied */ 4511 ASSERT(db->db.db_data == NULL); 4512 } else if (db->db_state == DB_FILL) { 4513 /* This buffer was freed and is now being re-filled */ 4514 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 4515 } else if (db->db_state == DB_READ) { 4516 /* 4517 * This buffer has a clone we need to write, and an in-flight 4518 * read on the BP we're about to clone. Its safe to issue the 4519 * write here because the read has already been issued and the 4520 * contents won't change. 4521 */ 4522 ASSERT(dr->dt.dl.dr_brtwrite && 4523 dr->dt.dl.dr_override_state == DR_OVERRIDDEN); 4524 } else { 4525 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 4526 } 4527 DBUF_VERIFY(db); 4528 4529 if (db->db_blkid == DMU_SPILL_BLKID) { 4530 mutex_enter(&dn->dn_mtx); 4531 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 4532 /* 4533 * In the previous transaction group, the bonus buffer 4534 * was entirely used to store the attributes for the 4535 * dnode which overrode the dn_spill field. However, 4536 * when adding more attributes to the file a spill 4537 * block was required to hold the extra attributes. 4538 * 4539 * Make sure to clear the garbage left in the dn_spill 4540 * field from the previous attributes in the bonus 4541 * buffer. Otherwise, after writing out the spill 4542 * block to the new allocated dva, it will free 4543 * the old block pointed to by the invalid dn_spill. 4544 */ 4545 db->db_blkptr = NULL; 4546 } 4547 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 4548 mutex_exit(&dn->dn_mtx); 4549 } 4550 4551 /* 4552 * If this is a bonus buffer, simply copy the bonus data into the 4553 * dnode. It will be written out when the dnode is synced (and it 4554 * will be synced, since it must have been dirty for dbuf_sync to 4555 * be called). 4556 */ 4557 if (db->db_blkid == DMU_BONUS_BLKID) { 4558 ASSERT(dr->dr_dbuf == db); 4559 dbuf_sync_bonus(dr, tx); 4560 return; 4561 } 4562 4563 os = dn->dn_objset; 4564 4565 /* 4566 * This function may have dropped the db_mtx lock allowing a dmu_sync 4567 * operation to sneak in. As a result, we need to ensure that we 4568 * don't check the dr_override_state until we have returned from 4569 * dbuf_check_blkptr. 4570 */ 4571 dbuf_check_blkptr(dn, db); 4572 4573 /* 4574 * If this buffer is in the middle of an immediate write, 4575 * wait for the synchronous IO to complete. 4576 */ 4577 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 4578 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 4579 cv_wait(&db->db_changed, &db->db_mtx); 4580 } 4581 4582 /* 4583 * If this is a dnode block, ensure it is appropriately encrypted 4584 * or decrypted, depending on what we are writing to it this txg. 4585 */ 4586 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) 4587 dbuf_prepare_encrypted_dnode_leaf(dr); 4588 4589 if (db->db_state != DB_NOFILL && 4590 dn->dn_object != DMU_META_DNODE_OBJECT && 4591 zfs_refcount_count(&db->db_holds) > 1 && 4592 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 4593 *datap == db->db_buf) { 4594 /* 4595 * If this buffer is currently "in use" (i.e., there 4596 * are active holds and db_data still references it), 4597 * then make a copy before we start the write so that 4598 * any modifications from the open txg will not leak 4599 * into this write. 4600 * 4601 * NOTE: this copy does not need to be made for 4602 * objects only modified in the syncing context (e.g. 4603 * DNONE_DNODE blocks). 4604 */ 4605 int psize = arc_buf_size(*datap); 4606 int lsize = arc_buf_lsize(*datap); 4607 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 4608 enum zio_compress compress_type = arc_get_compression(*datap); 4609 uint8_t complevel = arc_get_complevel(*datap); 4610 4611 if (arc_is_encrypted(*datap)) { 4612 boolean_t byteorder; 4613 uint8_t salt[ZIO_DATA_SALT_LEN]; 4614 uint8_t iv[ZIO_DATA_IV_LEN]; 4615 uint8_t mac[ZIO_DATA_MAC_LEN]; 4616 4617 arc_get_raw_params(*datap, &byteorder, salt, iv, mac); 4618 *datap = arc_alloc_raw_buf(os->os_spa, db, 4619 dmu_objset_id(os), byteorder, salt, iv, mac, 4620 dn->dn_type, psize, lsize, compress_type, 4621 complevel); 4622 } else if (compress_type != ZIO_COMPRESS_OFF) { 4623 ASSERT3U(type, ==, ARC_BUFC_DATA); 4624 *datap = arc_alloc_compressed_buf(os->os_spa, db, 4625 psize, lsize, compress_type, complevel); 4626 } else { 4627 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 4628 } 4629 memcpy((*datap)->b_data, db->db.db_data, psize); 4630 } 4631 db->db_data_pending = dr; 4632 4633 mutex_exit(&db->db_mtx); 4634 4635 dbuf_write(dr, *datap, tx); 4636 4637 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4638 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 4639 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr); 4640 } else { 4641 zio_nowait(dr->dr_zio); 4642 } 4643 } 4644 4645 /* 4646 * Syncs out a range of dirty records for indirect or leaf dbufs. May be 4647 * called recursively from dbuf_sync_indirect(). 4648 */ 4649 void 4650 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 4651 { 4652 dbuf_dirty_record_t *dr; 4653 4654 while ((dr = list_head(list))) { 4655 if (dr->dr_zio != NULL) { 4656 /* 4657 * If we find an already initialized zio then we 4658 * are processing the meta-dnode, and we have finished. 4659 * The dbufs for all dnodes are put back on the list 4660 * during processing, so that we can zio_wait() 4661 * these IOs after initiating all child IOs. 4662 */ 4663 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 4664 DMU_META_DNODE_OBJECT); 4665 break; 4666 } 4667 list_remove(list, dr); 4668 if (dr->dr_dbuf == NULL) { 4669 dbuf_sync_lightweight(dr, tx); 4670 } else { 4671 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 4672 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 4673 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 4674 } 4675 if (dr->dr_dbuf->db_level > 0) 4676 dbuf_sync_indirect(dr, tx); 4677 else 4678 dbuf_sync_leaf(dr, tx); 4679 } 4680 } 4681 } 4682 4683 static void 4684 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4685 { 4686 (void) buf; 4687 dmu_buf_impl_t *db = vdb; 4688 dnode_t *dn; 4689 blkptr_t *bp = zio->io_bp; 4690 blkptr_t *bp_orig = &zio->io_bp_orig; 4691 spa_t *spa = zio->io_spa; 4692 int64_t delta; 4693 uint64_t fill = 0; 4694 int i; 4695 4696 ASSERT3P(db->db_blkptr, !=, NULL); 4697 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 4698 4699 DB_DNODE_ENTER(db); 4700 dn = DB_DNODE(db); 4701 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 4702 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 4703 zio->io_prev_space_delta = delta; 4704 4705 if (bp->blk_birth != 0) { 4706 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 4707 BP_GET_TYPE(bp) == dn->dn_type) || 4708 (db->db_blkid == DMU_SPILL_BLKID && 4709 BP_GET_TYPE(bp) == dn->dn_bonustype) || 4710 BP_IS_EMBEDDED(bp)); 4711 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 4712 } 4713 4714 mutex_enter(&db->db_mtx); 4715 4716 #ifdef ZFS_DEBUG 4717 if (db->db_blkid == DMU_SPILL_BLKID) { 4718 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4719 ASSERT(!(BP_IS_HOLE(bp)) && 4720 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4721 } 4722 #endif 4723 4724 if (db->db_level == 0) { 4725 mutex_enter(&dn->dn_mtx); 4726 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 4727 db->db_blkid != DMU_SPILL_BLKID) { 4728 ASSERT0(db->db_objset->os_raw_receive); 4729 dn->dn_phys->dn_maxblkid = db->db_blkid; 4730 } 4731 mutex_exit(&dn->dn_mtx); 4732 4733 if (dn->dn_type == DMU_OT_DNODE) { 4734 i = 0; 4735 while (i < db->db.db_size) { 4736 dnode_phys_t *dnp = 4737 (void *)(((char *)db->db.db_data) + i); 4738 4739 i += DNODE_MIN_SIZE; 4740 if (dnp->dn_type != DMU_OT_NONE) { 4741 fill++; 4742 for (int j = 0; j < dnp->dn_nblkptr; 4743 j++) { 4744 (void) zfs_blkptr_verify(spa, 4745 &dnp->dn_blkptr[j], 4746 BLK_CONFIG_SKIP, 4747 BLK_VERIFY_HALT); 4748 } 4749 if (dnp->dn_flags & 4750 DNODE_FLAG_SPILL_BLKPTR) { 4751 (void) zfs_blkptr_verify(spa, 4752 DN_SPILL_BLKPTR(dnp), 4753 BLK_CONFIG_SKIP, 4754 BLK_VERIFY_HALT); 4755 } 4756 i += dnp->dn_extra_slots * 4757 DNODE_MIN_SIZE; 4758 } 4759 } 4760 } else { 4761 if (BP_IS_HOLE(bp)) { 4762 fill = 0; 4763 } else { 4764 fill = 1; 4765 } 4766 } 4767 } else { 4768 blkptr_t *ibp = db->db.db_data; 4769 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4770 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 4771 if (BP_IS_HOLE(ibp)) 4772 continue; 4773 (void) zfs_blkptr_verify(spa, ibp, 4774 BLK_CONFIG_SKIP, BLK_VERIFY_HALT); 4775 fill += BP_GET_FILL(ibp); 4776 } 4777 } 4778 DB_DNODE_EXIT(db); 4779 4780 if (!BP_IS_EMBEDDED(bp)) 4781 BP_SET_FILL(bp, fill); 4782 4783 mutex_exit(&db->db_mtx); 4784 4785 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG); 4786 *db->db_blkptr = *bp; 4787 dmu_buf_unlock_parent(db, dblt, FTAG); 4788 } 4789 4790 /* 4791 * This function gets called just prior to running through the compression 4792 * stage of the zio pipeline. If we're an indirect block comprised of only 4793 * holes, then we want this indirect to be compressed away to a hole. In 4794 * order to do that we must zero out any information about the holes that 4795 * this indirect points to prior to before we try to compress it. 4796 */ 4797 static void 4798 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4799 { 4800 (void) zio, (void) buf; 4801 dmu_buf_impl_t *db = vdb; 4802 dnode_t *dn; 4803 blkptr_t *bp; 4804 unsigned int epbs, i; 4805 4806 ASSERT3U(db->db_level, >, 0); 4807 DB_DNODE_ENTER(db); 4808 dn = DB_DNODE(db); 4809 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4810 ASSERT3U(epbs, <, 31); 4811 4812 /* Determine if all our children are holes */ 4813 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { 4814 if (!BP_IS_HOLE(bp)) 4815 break; 4816 } 4817 4818 /* 4819 * If all the children are holes, then zero them all out so that 4820 * we may get compressed away. 4821 */ 4822 if (i == 1ULL << epbs) { 4823 /* 4824 * We only found holes. Grab the rwlock to prevent 4825 * anybody from reading the blocks we're about to 4826 * zero out. 4827 */ 4828 rw_enter(&db->db_rwlock, RW_WRITER); 4829 memset(db->db.db_data, 0, db->db.db_size); 4830 rw_exit(&db->db_rwlock); 4831 } 4832 DB_DNODE_EXIT(db); 4833 } 4834 4835 static void 4836 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 4837 { 4838 (void) buf; 4839 dmu_buf_impl_t *db = vdb; 4840 blkptr_t *bp_orig = &zio->io_bp_orig; 4841 blkptr_t *bp = db->db_blkptr; 4842 objset_t *os = db->db_objset; 4843 dmu_tx_t *tx = os->os_synctx; 4844 4845 ASSERT0(zio->io_error); 4846 ASSERT(db->db_blkptr == bp); 4847 4848 /* 4849 * For nopwrites and rewrites we ensure that the bp matches our 4850 * original and bypass all the accounting. 4851 */ 4852 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4853 ASSERT(BP_EQUAL(bp, bp_orig)); 4854 } else { 4855 dsl_dataset_t *ds = os->os_dsl_dataset; 4856 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 4857 dsl_dataset_block_born(ds, bp, tx); 4858 } 4859 4860 mutex_enter(&db->db_mtx); 4861 4862 DBUF_VERIFY(db); 4863 4864 dbuf_dirty_record_t *dr = db->db_data_pending; 4865 dnode_t *dn = dr->dr_dnode; 4866 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4867 ASSERT(dr->dr_dbuf == db); 4868 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 4869 list_remove(&db->db_dirty_records, dr); 4870 4871 #ifdef ZFS_DEBUG 4872 if (db->db_blkid == DMU_SPILL_BLKID) { 4873 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4874 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 4875 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4876 } 4877 #endif 4878 4879 if (db->db_level == 0) { 4880 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 4881 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 4882 if (db->db_state != DB_NOFILL) { 4883 if (dr->dt.dl.dr_data != NULL && 4884 dr->dt.dl.dr_data != db->db_buf) { 4885 arc_buf_destroy(dr->dt.dl.dr_data, db); 4886 } 4887 } 4888 } else { 4889 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4890 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 4891 if (!BP_IS_HOLE(db->db_blkptr)) { 4892 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift - 4893 SPA_BLKPTRSHIFT; 4894 ASSERT3U(db->db_blkid, <=, 4895 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 4896 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 4897 db->db.db_size); 4898 } 4899 mutex_destroy(&dr->dt.di.dr_mtx); 4900 list_destroy(&dr->dt.di.dr_children); 4901 } 4902 4903 cv_broadcast(&db->db_changed); 4904 ASSERT(db->db_dirtycnt > 0); 4905 db->db_dirtycnt -= 1; 4906 db->db_data_pending = NULL; 4907 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4908 4909 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted, 4910 zio->io_txg); 4911 4912 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 4913 } 4914 4915 static void 4916 dbuf_write_nofill_ready(zio_t *zio) 4917 { 4918 dbuf_write_ready(zio, NULL, zio->io_private); 4919 } 4920 4921 static void 4922 dbuf_write_nofill_done(zio_t *zio) 4923 { 4924 dbuf_write_done(zio, NULL, zio->io_private); 4925 } 4926 4927 static void 4928 dbuf_write_override_ready(zio_t *zio) 4929 { 4930 dbuf_dirty_record_t *dr = zio->io_private; 4931 dmu_buf_impl_t *db = dr->dr_dbuf; 4932 4933 dbuf_write_ready(zio, NULL, db); 4934 } 4935 4936 static void 4937 dbuf_write_override_done(zio_t *zio) 4938 { 4939 dbuf_dirty_record_t *dr = zio->io_private; 4940 dmu_buf_impl_t *db = dr->dr_dbuf; 4941 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 4942 4943 mutex_enter(&db->db_mtx); 4944 if (!BP_EQUAL(zio->io_bp, obp)) { 4945 if (!BP_IS_HOLE(obp)) 4946 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 4947 arc_release(dr->dt.dl.dr_data, db); 4948 } 4949 mutex_exit(&db->db_mtx); 4950 4951 dbuf_write_done(zio, NULL, db); 4952 4953 if (zio->io_abd != NULL) 4954 abd_free(zio->io_abd); 4955 } 4956 4957 typedef struct dbuf_remap_impl_callback_arg { 4958 objset_t *drica_os; 4959 uint64_t drica_blk_birth; 4960 dmu_tx_t *drica_tx; 4961 } dbuf_remap_impl_callback_arg_t; 4962 4963 static void 4964 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 4965 void *arg) 4966 { 4967 dbuf_remap_impl_callback_arg_t *drica = arg; 4968 objset_t *os = drica->drica_os; 4969 spa_t *spa = dmu_objset_spa(os); 4970 dmu_tx_t *tx = drica->drica_tx; 4971 4972 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4973 4974 if (os == spa_meta_objset(spa)) { 4975 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 4976 } else { 4977 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 4978 size, drica->drica_blk_birth, tx); 4979 } 4980 } 4981 4982 static void 4983 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) 4984 { 4985 blkptr_t bp_copy = *bp; 4986 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4987 dbuf_remap_impl_callback_arg_t drica; 4988 4989 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 4990 4991 drica.drica_os = dn->dn_objset; 4992 drica.drica_blk_birth = bp->blk_birth; 4993 drica.drica_tx = tx; 4994 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 4995 &drica)) { 4996 /* 4997 * If the blkptr being remapped is tracked by a livelist, 4998 * then we need to make sure the livelist reflects the update. 4999 * First, cancel out the old blkptr by appending a 'FREE' 5000 * entry. Next, add an 'ALLOC' to track the new version. This 5001 * way we avoid trying to free an inaccurate blkptr at delete. 5002 * Note that embedded blkptrs are not tracked in livelists. 5003 */ 5004 if (dn->dn_objset != spa_meta_objset(spa)) { 5005 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset); 5006 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && 5007 bp->blk_birth > ds->ds_dir->dd_origin_txg) { 5008 ASSERT(!BP_IS_EMBEDDED(bp)); 5009 ASSERT(dsl_dir_is_clone(ds->ds_dir)); 5010 ASSERT(spa_feature_is_enabled(spa, 5011 SPA_FEATURE_LIVELIST)); 5012 bplist_append(&ds->ds_dir->dd_pending_frees, 5013 bp); 5014 bplist_append(&ds->ds_dir->dd_pending_allocs, 5015 &bp_copy); 5016 } 5017 } 5018 5019 /* 5020 * The db_rwlock prevents dbuf_read_impl() from 5021 * dereferencing the BP while we are changing it. To 5022 * avoid lock contention, only grab it when we are actually 5023 * changing the BP. 5024 */ 5025 if (rw != NULL) 5026 rw_enter(rw, RW_WRITER); 5027 *bp = bp_copy; 5028 if (rw != NULL) 5029 rw_exit(rw); 5030 } 5031 } 5032 5033 /* 5034 * Remap any existing BP's to concrete vdevs, if possible. 5035 */ 5036 static void 5037 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 5038 { 5039 spa_t *spa = dmu_objset_spa(db->db_objset); 5040 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 5041 5042 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 5043 return; 5044 5045 if (db->db_level > 0) { 5046 blkptr_t *bp = db->db.db_data; 5047 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 5048 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx); 5049 } 5050 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 5051 dnode_phys_t *dnp = db->db.db_data; 5052 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 5053 DMU_OT_DNODE); 5054 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; 5055 i += dnp[i].dn_extra_slots + 1) { 5056 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 5057 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL : 5058 &dn->dn_dbuf->db_rwlock); 5059 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock, 5060 tx); 5061 } 5062 } 5063 } 5064 } 5065 5066 5067 /* 5068 * Populate dr->dr_zio with a zio to commit a dirty buffer to disk. 5069 * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio). 5070 */ 5071 static void 5072 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 5073 { 5074 dmu_buf_impl_t *db = dr->dr_dbuf; 5075 dnode_t *dn = dr->dr_dnode; 5076 objset_t *os; 5077 dmu_buf_impl_t *parent = db->db_parent; 5078 uint64_t txg = tx->tx_txg; 5079 zbookmark_phys_t zb; 5080 zio_prop_t zp; 5081 zio_t *pio; /* parent I/O */ 5082 int wp_flag = 0; 5083 5084 ASSERT(dmu_tx_is_syncing(tx)); 5085 5086 os = dn->dn_objset; 5087 5088 if (db->db_state != DB_NOFILL) { 5089 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 5090 /* 5091 * Private object buffers are released here rather 5092 * than in dbuf_dirty() since they are only modified 5093 * in the syncing context and we don't want the 5094 * overhead of making multiple copies of the data. 5095 */ 5096 if (BP_IS_HOLE(db->db_blkptr)) { 5097 arc_buf_thaw(data); 5098 } else { 5099 dbuf_release_bp(db); 5100 } 5101 dbuf_remap(dn, db, tx); 5102 } 5103 } 5104 5105 if (parent != dn->dn_dbuf) { 5106 /* Our parent is an indirect block. */ 5107 /* We have a dirty parent that has been scheduled for write. */ 5108 ASSERT(parent && parent->db_data_pending); 5109 /* Our parent's buffer is one level closer to the dnode. */ 5110 ASSERT(db->db_level == parent->db_level-1); 5111 /* 5112 * We're about to modify our parent's db_data by modifying 5113 * our block pointer, so the parent must be released. 5114 */ 5115 ASSERT(arc_released(parent->db_buf)); 5116 pio = parent->db_data_pending->dr_zio; 5117 } else { 5118 /* Our parent is the dnode itself. */ 5119 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 5120 db->db_blkid != DMU_SPILL_BLKID) || 5121 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 5122 if (db->db_blkid != DMU_SPILL_BLKID) 5123 ASSERT3P(db->db_blkptr, ==, 5124 &dn->dn_phys->dn_blkptr[db->db_blkid]); 5125 pio = dn->dn_zio; 5126 } 5127 5128 ASSERT(db->db_level == 0 || data == db->db_buf); 5129 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 5130 ASSERT(pio); 5131 5132 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 5133 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 5134 db->db.db_object, db->db_level, db->db_blkid); 5135 5136 if (db->db_blkid == DMU_SPILL_BLKID) 5137 wp_flag = WP_SPILL; 5138 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 5139 5140 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 5141 5142 /* 5143 * We copy the blkptr now (rather than when we instantiate the dirty 5144 * record), because its value can change between open context and 5145 * syncing context. We do not need to hold dn_struct_rwlock to read 5146 * db_blkptr because we are in syncing context. 5147 */ 5148 dr->dr_bp_copy = *db->db_blkptr; 5149 5150 if (db->db_level == 0 && 5151 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 5152 /* 5153 * The BP for this block has been provided by open context 5154 * (by dmu_sync() or dmu_buf_write_embedded()). 5155 */ 5156 abd_t *contents = (data != NULL) ? 5157 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 5158 5159 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy, 5160 contents, db->db.db_size, db->db.db_size, &zp, 5161 dbuf_write_override_ready, NULL, 5162 dbuf_write_override_done, 5163 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 5164 mutex_enter(&db->db_mtx); 5165 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 5166 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 5167 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite, 5168 dr->dt.dl.dr_brtwrite); 5169 mutex_exit(&db->db_mtx); 5170 } else if (db->db_state == DB_NOFILL) { 5171 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 5172 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 5173 dr->dr_zio = zio_write(pio, os->os_spa, txg, 5174 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 5175 dbuf_write_nofill_ready, NULL, 5176 dbuf_write_nofill_done, db, 5177 ZIO_PRIORITY_ASYNC_WRITE, 5178 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 5179 } else { 5180 ASSERT(arc_released(data)); 5181 5182 /* 5183 * For indirect blocks, we want to setup the children 5184 * ready callback so that we can properly handle an indirect 5185 * block that only contains holes. 5186 */ 5187 arc_write_done_func_t *children_ready_cb = NULL; 5188 if (db->db_level != 0) 5189 children_ready_cb = dbuf_write_children_ready; 5190 5191 dr->dr_zio = arc_write(pio, os->os_spa, txg, 5192 &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db), 5193 dbuf_is_l2cacheable(db), &zp, dbuf_write_ready, 5194 children_ready_cb, dbuf_write_done, db, 5195 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 5196 } 5197 } 5198 5199 EXPORT_SYMBOL(dbuf_find); 5200 EXPORT_SYMBOL(dbuf_is_metadata); 5201 EXPORT_SYMBOL(dbuf_destroy); 5202 EXPORT_SYMBOL(dbuf_loan_arcbuf); 5203 EXPORT_SYMBOL(dbuf_whichblock); 5204 EXPORT_SYMBOL(dbuf_read); 5205 EXPORT_SYMBOL(dbuf_unoverride); 5206 EXPORT_SYMBOL(dbuf_free_range); 5207 EXPORT_SYMBOL(dbuf_new_size); 5208 EXPORT_SYMBOL(dbuf_release_bp); 5209 EXPORT_SYMBOL(dbuf_dirty); 5210 EXPORT_SYMBOL(dmu_buf_set_crypt_params); 5211 EXPORT_SYMBOL(dmu_buf_will_dirty); 5212 EXPORT_SYMBOL(dmu_buf_is_dirty); 5213 EXPORT_SYMBOL(dmu_buf_will_clone); 5214 EXPORT_SYMBOL(dmu_buf_will_not_fill); 5215 EXPORT_SYMBOL(dmu_buf_will_fill); 5216 EXPORT_SYMBOL(dmu_buf_fill_done); 5217 EXPORT_SYMBOL(dmu_buf_rele); 5218 EXPORT_SYMBOL(dbuf_assign_arcbuf); 5219 EXPORT_SYMBOL(dbuf_prefetch); 5220 EXPORT_SYMBOL(dbuf_hold_impl); 5221 EXPORT_SYMBOL(dbuf_hold); 5222 EXPORT_SYMBOL(dbuf_hold_level); 5223 EXPORT_SYMBOL(dbuf_create_bonus); 5224 EXPORT_SYMBOL(dbuf_spill_set_blksz); 5225 EXPORT_SYMBOL(dbuf_rm_spill); 5226 EXPORT_SYMBOL(dbuf_add_ref); 5227 EXPORT_SYMBOL(dbuf_rele); 5228 EXPORT_SYMBOL(dbuf_rele_and_unlock); 5229 EXPORT_SYMBOL(dbuf_refcount); 5230 EXPORT_SYMBOL(dbuf_sync_list); 5231 EXPORT_SYMBOL(dmu_buf_set_user); 5232 EXPORT_SYMBOL(dmu_buf_set_user_ie); 5233 EXPORT_SYMBOL(dmu_buf_get_user); 5234 EXPORT_SYMBOL(dmu_buf_get_blkptr); 5235 5236 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW, 5237 "Maximum size in bytes of the dbuf cache."); 5238 5239 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW, 5240 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction."); 5241 5242 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW, 5243 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops."); 5244 5245 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW, 5246 "Maximum size in bytes of dbuf metadata cache."); 5247 5248 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW, 5249 "Set size of dbuf cache to log2 fraction of arc size."); 5250 5251 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW, 5252 "Set size of dbuf metadata cache to log2 fraction of arc size."); 5253 5254 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD, 5255 "Set size of dbuf cache mutex array as log2 shift."); 5256