1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012, 2020 by Delphix. All rights reserved. 26 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2019, Klara Inc. 29 * Copyright (c) 2019, Allan Jude 30 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/arc.h> 35 #include <sys/dmu.h> 36 #include <sys/dmu_send.h> 37 #include <sys/dmu_impl.h> 38 #include <sys/dbuf.h> 39 #include <sys/dmu_objset.h> 40 #include <sys/dsl_dataset.h> 41 #include <sys/dsl_dir.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/spa.h> 44 #include <sys/zio.h> 45 #include <sys/dmu_zfetch.h> 46 #include <sys/sa.h> 47 #include <sys/sa_impl.h> 48 #include <sys/zfeature.h> 49 #include <sys/blkptr.h> 50 #include <sys/range_tree.h> 51 #include <sys/trace_zfs.h> 52 #include <sys/callb.h> 53 #include <sys/abd.h> 54 #include <sys/brt.h> 55 #include <sys/vdev.h> 56 #include <cityhash.h> 57 #include <sys/spa_impl.h> 58 #include <sys/wmsum.h> 59 #include <sys/vdev_impl.h> 60 61 static kstat_t *dbuf_ksp; 62 63 typedef struct dbuf_stats { 64 /* 65 * Various statistics about the size of the dbuf cache. 66 */ 67 kstat_named_t cache_count; 68 kstat_named_t cache_size_bytes; 69 kstat_named_t cache_size_bytes_max; 70 /* 71 * Statistics regarding the bounds on the dbuf cache size. 72 */ 73 kstat_named_t cache_target_bytes; 74 kstat_named_t cache_lowater_bytes; 75 kstat_named_t cache_hiwater_bytes; 76 /* 77 * Total number of dbuf cache evictions that have occurred. 78 */ 79 kstat_named_t cache_total_evicts; 80 /* 81 * The distribution of dbuf levels in the dbuf cache and 82 * the total size of all dbufs at each level. 83 */ 84 kstat_named_t cache_levels[DN_MAX_LEVELS]; 85 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS]; 86 /* 87 * Statistics about the dbuf hash table. 88 */ 89 kstat_named_t hash_hits; 90 kstat_named_t hash_misses; 91 kstat_named_t hash_collisions; 92 kstat_named_t hash_elements; 93 /* 94 * Number of sublists containing more than one dbuf in the dbuf 95 * hash table. Keep track of the longest hash chain. 96 */ 97 kstat_named_t hash_chains; 98 kstat_named_t hash_chain_max; 99 /* 100 * Number of times a dbuf_create() discovers that a dbuf was 101 * already created and in the dbuf hash table. 102 */ 103 kstat_named_t hash_insert_race; 104 /* 105 * Number of entries in the hash table dbuf and mutex arrays. 106 */ 107 kstat_named_t hash_table_count; 108 kstat_named_t hash_mutex_count; 109 /* 110 * Statistics about the size of the metadata dbuf cache. 111 */ 112 kstat_named_t metadata_cache_count; 113 kstat_named_t metadata_cache_size_bytes; 114 kstat_named_t metadata_cache_size_bytes_max; 115 /* 116 * For diagnostic purposes, this is incremented whenever we can't add 117 * something to the metadata cache because it's full, and instead put 118 * the data in the regular dbuf cache. 119 */ 120 kstat_named_t metadata_cache_overflow; 121 } dbuf_stats_t; 122 123 dbuf_stats_t dbuf_stats = { 124 { "cache_count", KSTAT_DATA_UINT64 }, 125 { "cache_size_bytes", KSTAT_DATA_UINT64 }, 126 { "cache_size_bytes_max", KSTAT_DATA_UINT64 }, 127 { "cache_target_bytes", KSTAT_DATA_UINT64 }, 128 { "cache_lowater_bytes", KSTAT_DATA_UINT64 }, 129 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 }, 130 { "cache_total_evicts", KSTAT_DATA_UINT64 }, 131 { { "cache_levels_N", KSTAT_DATA_UINT64 } }, 132 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } }, 133 { "hash_hits", KSTAT_DATA_UINT64 }, 134 { "hash_misses", KSTAT_DATA_UINT64 }, 135 { "hash_collisions", KSTAT_DATA_UINT64 }, 136 { "hash_elements", KSTAT_DATA_UINT64 }, 137 { "hash_chains", KSTAT_DATA_UINT64 }, 138 { "hash_chain_max", KSTAT_DATA_UINT64 }, 139 { "hash_insert_race", KSTAT_DATA_UINT64 }, 140 { "hash_table_count", KSTAT_DATA_UINT64 }, 141 { "hash_mutex_count", KSTAT_DATA_UINT64 }, 142 { "metadata_cache_count", KSTAT_DATA_UINT64 }, 143 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 }, 144 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 }, 145 { "metadata_cache_overflow", KSTAT_DATA_UINT64 } 146 }; 147 148 struct { 149 wmsum_t cache_count; 150 wmsum_t cache_total_evicts; 151 wmsum_t cache_levels[DN_MAX_LEVELS]; 152 wmsum_t cache_levels_bytes[DN_MAX_LEVELS]; 153 wmsum_t hash_hits; 154 wmsum_t hash_misses; 155 wmsum_t hash_collisions; 156 wmsum_t hash_elements; 157 wmsum_t hash_chains; 158 wmsum_t hash_insert_race; 159 wmsum_t metadata_cache_count; 160 wmsum_t metadata_cache_overflow; 161 } dbuf_sums; 162 163 #define DBUF_STAT_INCR(stat, val) \ 164 wmsum_add(&dbuf_sums.stat, val) 165 #define DBUF_STAT_DECR(stat, val) \ 166 DBUF_STAT_INCR(stat, -(val)) 167 #define DBUF_STAT_BUMP(stat) \ 168 DBUF_STAT_INCR(stat, 1) 169 #define DBUF_STAT_BUMPDOWN(stat) \ 170 DBUF_STAT_INCR(stat, -1) 171 #define DBUF_STAT_MAX(stat, v) { \ 172 uint64_t _m; \ 173 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \ 174 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\ 175 continue; \ 176 } 177 178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr); 180 181 /* 182 * Global data structures and functions for the dbuf cache. 183 */ 184 static kmem_cache_t *dbuf_kmem_cache; 185 kmem_cache_t *dbuf_dirty_kmem_cache; 186 static taskq_t *dbu_evict_taskq; 187 188 static kthread_t *dbuf_cache_evict_thread; 189 static kmutex_t dbuf_evict_lock; 190 static kcondvar_t dbuf_evict_cv; 191 static boolean_t dbuf_evict_thread_exit; 192 193 /* 194 * There are two dbuf caches; each dbuf can only be in one of them at a time. 195 * 196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 198 * that represent the metadata that describes filesystems/snapshots/ 199 * bookmarks/properties/etc. We only evict from this cache when we export a 200 * pool, to short-circuit as much I/O as possible for all administrative 201 * commands that need the metadata. There is no eviction policy for this 202 * cache, because we try to only include types in it which would occupy a 203 * very small amount of space per object but create a large impact on the 204 * performance of these commands. Instead, after it reaches a maximum size 205 * (which should only happen on very small memory systems with a very large 206 * number of filesystem objects), we stop taking new dbufs into the 207 * metadata cache, instead putting them in the normal dbuf cache. 208 * 209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 210 * are not currently held but have been recently released. These dbufs 211 * are not eligible for arc eviction until they are aged out of the cache. 212 * Dbufs that are aged out of the cache will be immediately destroyed and 213 * become eligible for arc eviction. 214 * 215 * Dbufs are added to these caches once the last hold is released. If a dbuf is 216 * later accessed and still exists in the dbuf cache, then it will be removed 217 * from the cache and later re-added to the head of the cache. 218 * 219 * If a given dbuf meets the requirements for the metadata cache, it will go 220 * there, otherwise it will be considered for the generic LRU dbuf cache. The 221 * caches and the refcounts tracking their sizes are stored in an array indexed 222 * by those caches' matching enum values (from dbuf_cached_state_t). 223 */ 224 typedef struct dbuf_cache { 225 multilist_t cache; 226 zfs_refcount_t size ____cacheline_aligned; 227 } dbuf_cache_t; 228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 229 230 /* Size limits for the caches */ 231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX; 232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX; 233 234 /* Set the default sizes of the caches to log2 fraction of arc size */ 235 static uint_t dbuf_cache_shift = 5; 236 static uint_t dbuf_metadata_cache_shift = 6; 237 238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */ 239 static uint_t dbuf_mutex_cache_shift = 0; 240 241 static unsigned long dbuf_cache_target_bytes(void); 242 static unsigned long dbuf_metadata_cache_target_bytes(void); 243 244 /* 245 * The LRU dbuf cache uses a three-stage eviction policy: 246 * - A low water marker designates when the dbuf eviction thread 247 * should stop evicting from the dbuf cache. 248 * - When we reach the maximum size (aka mid water mark), we 249 * signal the eviction thread to run. 250 * - The high water mark indicates when the eviction thread 251 * is unable to keep up with the incoming load and eviction must 252 * happen in the context of the calling thread. 253 * 254 * The dbuf cache: 255 * (max size) 256 * low water mid water hi water 257 * +----------------------------------------+----------+----------+ 258 * | | | | 259 * | | | | 260 * | | | | 261 * | | | | 262 * +----------------------------------------+----------+----------+ 263 * stop signal evict 264 * evicting eviction directly 265 * thread 266 * 267 * The high and low water marks indicate the operating range for the eviction 268 * thread. The low water mark is, by default, 90% of the total size of the 269 * cache and the high water mark is at 110% (both of these percentages can be 270 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 271 * respectively). The eviction thread will try to ensure that the cache remains 272 * within this range by waking up every second and checking if the cache is 273 * above the low water mark. The thread can also be woken up by callers adding 274 * elements into the cache if the cache is larger than the mid water (i.e max 275 * cache size). Once the eviction thread is woken up and eviction is required, 276 * it will continue evicting buffers until it's able to reduce the cache size 277 * to the low water mark. If the cache size continues to grow and hits the high 278 * water mark, then callers adding elements to the cache will begin to evict 279 * directly from the cache until the cache is no longer above the high water 280 * mark. 281 */ 282 283 /* 284 * The percentage above and below the maximum cache size. 285 */ 286 static uint_t dbuf_cache_hiwater_pct = 10; 287 static uint_t dbuf_cache_lowater_pct = 10; 288 289 static int 290 dbuf_cons(void *vdb, void *unused, int kmflag) 291 { 292 (void) unused, (void) kmflag; 293 dmu_buf_impl_t *db = vdb; 294 memset(db, 0, sizeof (dmu_buf_impl_t)); 295 296 mutex_init(&db->db_mtx, NULL, MUTEX_NOLOCKDEP, NULL); 297 rw_init(&db->db_rwlock, NULL, RW_NOLOCKDEP, NULL); 298 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 299 multilist_link_init(&db->db_cache_link); 300 zfs_refcount_create(&db->db_holds); 301 302 return (0); 303 } 304 305 static void 306 dbuf_dest(void *vdb, void *unused) 307 { 308 (void) unused; 309 dmu_buf_impl_t *db = vdb; 310 mutex_destroy(&db->db_mtx); 311 rw_destroy(&db->db_rwlock); 312 cv_destroy(&db->db_changed); 313 ASSERT(!multilist_link_active(&db->db_cache_link)); 314 zfs_refcount_destroy(&db->db_holds); 315 } 316 317 /* 318 * dbuf hash table routines 319 */ 320 static dbuf_hash_table_t dbuf_hash_table; 321 322 /* 323 * We use Cityhash for this. It's fast, and has good hash properties without 324 * requiring any large static buffers. 325 */ 326 static uint64_t 327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 328 { 329 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 330 } 331 332 #define DTRACE_SET_STATE(db, why) \ 333 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \ 334 const char *, why) 335 336 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 337 ((dbuf)->db.db_object == (obj) && \ 338 (dbuf)->db_objset == (os) && \ 339 (dbuf)->db_level == (level) && \ 340 (dbuf)->db_blkid == (blkid)) 341 342 dmu_buf_impl_t * 343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid, 344 uint64_t *hash_out) 345 { 346 dbuf_hash_table_t *h = &dbuf_hash_table; 347 uint64_t hv; 348 uint64_t idx; 349 dmu_buf_impl_t *db; 350 351 hv = dbuf_hash(os, obj, level, blkid); 352 idx = hv & h->hash_table_mask; 353 354 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 355 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 356 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 357 mutex_enter(&db->db_mtx); 358 if (db->db_state != DB_EVICTING) { 359 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 360 return (db); 361 } 362 mutex_exit(&db->db_mtx); 363 } 364 } 365 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 366 if (hash_out != NULL) 367 *hash_out = hv; 368 return (NULL); 369 } 370 371 static dmu_buf_impl_t * 372 dbuf_find_bonus(objset_t *os, uint64_t object) 373 { 374 dnode_t *dn; 375 dmu_buf_impl_t *db = NULL; 376 377 if (dnode_hold(os, object, FTAG, &dn) == 0) { 378 rw_enter(&dn->dn_struct_rwlock, RW_READER); 379 if (dn->dn_bonus != NULL) { 380 db = dn->dn_bonus; 381 mutex_enter(&db->db_mtx); 382 } 383 rw_exit(&dn->dn_struct_rwlock); 384 dnode_rele(dn, FTAG); 385 } 386 return (db); 387 } 388 389 /* 390 * Insert an entry into the hash table. If there is already an element 391 * equal to elem in the hash table, then the already existing element 392 * will be returned and the new element will not be inserted. 393 * Otherwise returns NULL. 394 */ 395 static dmu_buf_impl_t * 396 dbuf_hash_insert(dmu_buf_impl_t *db) 397 { 398 dbuf_hash_table_t *h = &dbuf_hash_table; 399 objset_t *os = db->db_objset; 400 uint64_t obj = db->db.db_object; 401 int level = db->db_level; 402 uint64_t blkid, idx; 403 dmu_buf_impl_t *dbf; 404 uint32_t i; 405 406 blkid = db->db_blkid; 407 ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash); 408 idx = db->db_hash & h->hash_table_mask; 409 410 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 411 for (dbf = h->hash_table[idx], i = 0; dbf != NULL; 412 dbf = dbf->db_hash_next, i++) { 413 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 414 mutex_enter(&dbf->db_mtx); 415 if (dbf->db_state != DB_EVICTING) { 416 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 417 return (dbf); 418 } 419 mutex_exit(&dbf->db_mtx); 420 } 421 } 422 423 if (i > 0) { 424 DBUF_STAT_BUMP(hash_collisions); 425 if (i == 1) 426 DBUF_STAT_BUMP(hash_chains); 427 428 DBUF_STAT_MAX(hash_chain_max, i); 429 } 430 431 mutex_enter(&db->db_mtx); 432 db->db_hash_next = h->hash_table[idx]; 433 h->hash_table[idx] = db; 434 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 435 DBUF_STAT_BUMP(hash_elements); 436 437 return (NULL); 438 } 439 440 /* 441 * This returns whether this dbuf should be stored in the metadata cache, which 442 * is based on whether it's from one of the dnode types that store data related 443 * to traversing dataset hierarchies. 444 */ 445 static boolean_t 446 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 447 { 448 DB_DNODE_ENTER(db); 449 dmu_object_type_t type = DB_DNODE(db)->dn_type; 450 DB_DNODE_EXIT(db); 451 452 /* Check if this dbuf is one of the types we care about */ 453 if (DMU_OT_IS_METADATA_CACHED(type)) { 454 /* If we hit this, then we set something up wrong in dmu_ot */ 455 ASSERT(DMU_OT_IS_METADATA(type)); 456 457 /* 458 * Sanity check for small-memory systems: don't allocate too 459 * much memory for this purpose. 460 */ 461 if (zfs_refcount_count( 462 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 463 dbuf_metadata_cache_target_bytes()) { 464 DBUF_STAT_BUMP(metadata_cache_overflow); 465 return (B_FALSE); 466 } 467 468 return (B_TRUE); 469 } 470 471 return (B_FALSE); 472 } 473 474 /* 475 * Remove an entry from the hash table. It must be in the EVICTING state. 476 */ 477 static void 478 dbuf_hash_remove(dmu_buf_impl_t *db) 479 { 480 dbuf_hash_table_t *h = &dbuf_hash_table; 481 uint64_t idx; 482 dmu_buf_impl_t *dbf, **dbp; 483 484 ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level, 485 db->db_blkid), ==, db->db_hash); 486 idx = db->db_hash & h->hash_table_mask; 487 488 /* 489 * We mustn't hold db_mtx to maintain lock ordering: 490 * DBUF_HASH_MUTEX > db_mtx. 491 */ 492 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 493 ASSERT(db->db_state == DB_EVICTING); 494 ASSERT(!MUTEX_HELD(&db->db_mtx)); 495 496 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 497 dbp = &h->hash_table[idx]; 498 while ((dbf = *dbp) != db) { 499 dbp = &dbf->db_hash_next; 500 ASSERT(dbf != NULL); 501 } 502 *dbp = db->db_hash_next; 503 db->db_hash_next = NULL; 504 if (h->hash_table[idx] && 505 h->hash_table[idx]->db_hash_next == NULL) 506 DBUF_STAT_BUMPDOWN(hash_chains); 507 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 508 DBUF_STAT_BUMPDOWN(hash_elements); 509 } 510 511 typedef enum { 512 DBVU_EVICTING, 513 DBVU_NOT_EVICTING 514 } dbvu_verify_type_t; 515 516 static void 517 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 518 { 519 #ifdef ZFS_DEBUG 520 int64_t holds; 521 522 if (db->db_user == NULL) 523 return; 524 525 /* Only data blocks support the attachment of user data. */ 526 ASSERT(db->db_level == 0); 527 528 /* Clients must resolve a dbuf before attaching user data. */ 529 ASSERT(db->db.db_data != NULL); 530 ASSERT3U(db->db_state, ==, DB_CACHED); 531 532 holds = zfs_refcount_count(&db->db_holds); 533 if (verify_type == DBVU_EVICTING) { 534 /* 535 * Immediate eviction occurs when holds == dirtycnt. 536 * For normal eviction buffers, holds is zero on 537 * eviction, except when dbuf_fix_old_data() calls 538 * dbuf_clear_data(). However, the hold count can grow 539 * during eviction even though db_mtx is held (see 540 * dmu_bonus_hold() for an example), so we can only 541 * test the generic invariant that holds >= dirtycnt. 542 */ 543 ASSERT3U(holds, >=, db->db_dirtycnt); 544 } else { 545 if (db->db_user_immediate_evict == TRUE) 546 ASSERT3U(holds, >=, db->db_dirtycnt); 547 else 548 ASSERT3U(holds, >, 0); 549 } 550 #endif 551 } 552 553 static void 554 dbuf_evict_user(dmu_buf_impl_t *db) 555 { 556 dmu_buf_user_t *dbu = db->db_user; 557 558 ASSERT(MUTEX_HELD(&db->db_mtx)); 559 560 if (dbu == NULL) 561 return; 562 563 dbuf_verify_user(db, DBVU_EVICTING); 564 db->db_user = NULL; 565 566 #ifdef ZFS_DEBUG 567 if (dbu->dbu_clear_on_evict_dbufp != NULL) 568 *dbu->dbu_clear_on_evict_dbufp = NULL; 569 #endif 570 571 if (db->db_caching_status != DB_NO_CACHE) { 572 /* 573 * This is a cached dbuf, so the size of the user data is 574 * included in its cached amount. We adjust it here because the 575 * user data has already been detached from the dbuf, and the 576 * sync functions are not supposed to touch it (the dbuf might 577 * not exist anymore by the time the sync functions run. 578 */ 579 uint64_t size = dbu->dbu_size; 580 (void) zfs_refcount_remove_many( 581 &dbuf_caches[db->db_caching_status].size, size, dbu); 582 if (db->db_caching_status == DB_DBUF_CACHE) 583 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size); 584 } 585 586 /* 587 * There are two eviction callbacks - one that we call synchronously 588 * and one that we invoke via a taskq. The async one is useful for 589 * avoiding lock order reversals and limiting stack depth. 590 * 591 * Note that if we have a sync callback but no async callback, 592 * it's likely that the sync callback will free the structure 593 * containing the dbu. In that case we need to take care to not 594 * dereference dbu after calling the sync evict func. 595 */ 596 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 597 598 if (dbu->dbu_evict_func_sync != NULL) 599 dbu->dbu_evict_func_sync(dbu); 600 601 if (has_async) { 602 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 603 dbu, 0, &dbu->dbu_tqent); 604 } 605 } 606 607 boolean_t 608 dbuf_is_metadata(dmu_buf_impl_t *db) 609 { 610 /* 611 * Consider indirect blocks and spill blocks to be meta data. 612 */ 613 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { 614 return (B_TRUE); 615 } else { 616 boolean_t is_metadata; 617 618 DB_DNODE_ENTER(db); 619 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 620 DB_DNODE_EXIT(db); 621 622 return (is_metadata); 623 } 624 } 625 626 /* 627 * We want to exclude buffers that are on a special allocation class from 628 * L2ARC. 629 */ 630 boolean_t 631 dbuf_is_l2cacheable(dmu_buf_impl_t *db, blkptr_t *bp) 632 { 633 if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL || 634 (db->db_objset->os_secondary_cache == 635 ZFS_CACHE_METADATA && dbuf_is_metadata(db))) { 636 if (l2arc_exclude_special == 0) 637 return (B_TRUE); 638 639 /* 640 * bp must be checked in the event it was passed from 641 * dbuf_read_impl() as the result of a the BP being set from 642 * a Direct I/O write in dbuf_read(). See comments in 643 * dbuf_read(). 644 */ 645 blkptr_t *db_bp = bp == NULL ? db->db_blkptr : bp; 646 647 if (db_bp == NULL || BP_IS_HOLE(db_bp)) 648 return (B_FALSE); 649 uint64_t vdev = DVA_GET_VDEV(db_bp->blk_dva); 650 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev; 651 vdev_t *vd = NULL; 652 653 if (vdev < rvd->vdev_children) 654 vd = rvd->vdev_child[vdev]; 655 656 if (vd == NULL) 657 return (B_TRUE); 658 659 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 660 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) 661 return (B_TRUE); 662 } 663 return (B_FALSE); 664 } 665 666 static inline boolean_t 667 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level) 668 { 669 if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL || 670 (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA && 671 (level > 0 || 672 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) { 673 if (l2arc_exclude_special == 0) 674 return (B_TRUE); 675 676 if (bp == NULL || BP_IS_HOLE(bp)) 677 return (B_FALSE); 678 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva); 679 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev; 680 vdev_t *vd = NULL; 681 682 if (vdev < rvd->vdev_children) 683 vd = rvd->vdev_child[vdev]; 684 685 if (vd == NULL) 686 return (B_TRUE); 687 688 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL && 689 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) 690 return (B_TRUE); 691 } 692 return (B_FALSE); 693 } 694 695 696 /* 697 * This function *must* return indices evenly distributed between all 698 * sublists of the multilist. This is needed due to how the dbuf eviction 699 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 700 * distributed between all sublists and uses this assumption when 701 * deciding which sublist to evict from and how much to evict from it. 702 */ 703 static unsigned int 704 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 705 { 706 dmu_buf_impl_t *db = obj; 707 708 /* 709 * The assumption here, is the hash value for a given 710 * dmu_buf_impl_t will remain constant throughout it's lifetime 711 * (i.e. it's objset, object, level and blkid fields don't change). 712 * Thus, we don't need to store the dbuf's sublist index 713 * on insertion, as this index can be recalculated on removal. 714 * 715 * Also, the low order bits of the hash value are thought to be 716 * distributed evenly. Otherwise, in the case that the multilist 717 * has a power of two number of sublists, each sublists' usage 718 * would not be evenly distributed. In this context full 64bit 719 * division would be a waste of time, so limit it to 32 bits. 720 */ 721 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object, 722 db->db_level, db->db_blkid) % 723 multilist_get_num_sublists(ml)); 724 } 725 726 /* 727 * The target size of the dbuf cache can grow with the ARC target, 728 * unless limited by the tunable dbuf_cache_max_bytes. 729 */ 730 static inline unsigned long 731 dbuf_cache_target_bytes(void) 732 { 733 return (MIN(dbuf_cache_max_bytes, 734 arc_target_bytes() >> dbuf_cache_shift)); 735 } 736 737 /* 738 * The target size of the dbuf metadata cache can grow with the ARC target, 739 * unless limited by the tunable dbuf_metadata_cache_max_bytes. 740 */ 741 static inline unsigned long 742 dbuf_metadata_cache_target_bytes(void) 743 { 744 return (MIN(dbuf_metadata_cache_max_bytes, 745 arc_target_bytes() >> dbuf_metadata_cache_shift)); 746 } 747 748 static inline uint64_t 749 dbuf_cache_hiwater_bytes(void) 750 { 751 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 752 return (dbuf_cache_target + 753 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100); 754 } 755 756 static inline uint64_t 757 dbuf_cache_lowater_bytes(void) 758 { 759 uint64_t dbuf_cache_target = dbuf_cache_target_bytes(); 760 return (dbuf_cache_target - 761 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100); 762 } 763 764 static inline boolean_t 765 dbuf_cache_above_lowater(void) 766 { 767 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 768 dbuf_cache_lowater_bytes()); 769 } 770 771 /* 772 * Evict the oldest eligible dbuf from the dbuf cache. 773 */ 774 static void 775 dbuf_evict_one(void) 776 { 777 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache); 778 multilist_sublist_t *mls = multilist_sublist_lock_idx( 779 &dbuf_caches[DB_DBUF_CACHE].cache, idx); 780 781 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 782 783 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 784 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 785 db = multilist_sublist_prev(mls, db); 786 } 787 788 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 789 multilist_sublist_t *, mls); 790 791 if (db != NULL) { 792 multilist_sublist_remove(mls, db); 793 multilist_sublist_unlock(mls); 794 uint64_t size = db->db.db_size; 795 uint64_t usize = dmu_buf_user_size(&db->db); 796 (void) zfs_refcount_remove_many( 797 &dbuf_caches[DB_DBUF_CACHE].size, size, db); 798 (void) zfs_refcount_remove_many( 799 &dbuf_caches[DB_DBUF_CACHE].size, usize, db->db_user); 800 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 801 DBUF_STAT_BUMPDOWN(cache_count); 802 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size + usize); 803 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 804 db->db_caching_status = DB_NO_CACHE; 805 dbuf_destroy(db); 806 DBUF_STAT_BUMP(cache_total_evicts); 807 } else { 808 multilist_sublist_unlock(mls); 809 } 810 } 811 812 /* 813 * The dbuf evict thread is responsible for aging out dbufs from the 814 * cache. Once the cache has reached it's maximum size, dbufs are removed 815 * and destroyed. The eviction thread will continue running until the size 816 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 817 * out of the cache it is destroyed and becomes eligible for arc eviction. 818 */ 819 static __attribute__((noreturn)) void 820 dbuf_evict_thread(void *unused) 821 { 822 (void) unused; 823 callb_cpr_t cpr; 824 825 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 826 827 mutex_enter(&dbuf_evict_lock); 828 while (!dbuf_evict_thread_exit) { 829 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 830 CALLB_CPR_SAFE_BEGIN(&cpr); 831 (void) cv_timedwait_idle_hires(&dbuf_evict_cv, 832 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 833 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 834 } 835 mutex_exit(&dbuf_evict_lock); 836 837 /* 838 * Keep evicting as long as we're above the low water mark 839 * for the cache. We do this without holding the locks to 840 * minimize lock contention. 841 */ 842 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 843 dbuf_evict_one(); 844 } 845 846 mutex_enter(&dbuf_evict_lock); 847 } 848 849 dbuf_evict_thread_exit = B_FALSE; 850 cv_broadcast(&dbuf_evict_cv); 851 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 852 thread_exit(); 853 } 854 855 /* 856 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 857 * If the dbuf cache is at its high water mark, then evict a dbuf from the 858 * dbuf cache using the caller's context. 859 */ 860 static void 861 dbuf_evict_notify(uint64_t size) 862 { 863 /* 864 * We check if we should evict without holding the dbuf_evict_lock, 865 * because it's OK to occasionally make the wrong decision here, 866 * and grabbing the lock results in massive lock contention. 867 */ 868 if (size > dbuf_cache_target_bytes()) { 869 if (size > dbuf_cache_hiwater_bytes()) 870 dbuf_evict_one(); 871 cv_signal(&dbuf_evict_cv); 872 } 873 } 874 875 /* 876 * Since dbuf cache size is a fraction of target ARC size, ARC calls this when 877 * its target size is reduced due to memory pressure. 878 */ 879 void 880 dbuf_cache_reduce_target_size(void) 881 { 882 uint64_t size = zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size); 883 884 if (size > dbuf_cache_target_bytes()) 885 cv_signal(&dbuf_evict_cv); 886 } 887 888 static int 889 dbuf_kstat_update(kstat_t *ksp, int rw) 890 { 891 dbuf_stats_t *ds = ksp->ks_data; 892 dbuf_hash_table_t *h = &dbuf_hash_table; 893 894 if (rw == KSTAT_WRITE) 895 return (SET_ERROR(EACCES)); 896 897 ds->cache_count.value.ui64 = 898 wmsum_value(&dbuf_sums.cache_count); 899 ds->cache_size_bytes.value.ui64 = 900 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size); 901 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes(); 902 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes(); 903 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes(); 904 ds->cache_total_evicts.value.ui64 = 905 wmsum_value(&dbuf_sums.cache_total_evicts); 906 for (int i = 0; i < DN_MAX_LEVELS; i++) { 907 ds->cache_levels[i].value.ui64 = 908 wmsum_value(&dbuf_sums.cache_levels[i]); 909 ds->cache_levels_bytes[i].value.ui64 = 910 wmsum_value(&dbuf_sums.cache_levels_bytes[i]); 911 } 912 ds->hash_hits.value.ui64 = 913 wmsum_value(&dbuf_sums.hash_hits); 914 ds->hash_misses.value.ui64 = 915 wmsum_value(&dbuf_sums.hash_misses); 916 ds->hash_collisions.value.ui64 = 917 wmsum_value(&dbuf_sums.hash_collisions); 918 ds->hash_elements.value.ui64 = 919 wmsum_value(&dbuf_sums.hash_elements); 920 ds->hash_chains.value.ui64 = 921 wmsum_value(&dbuf_sums.hash_chains); 922 ds->hash_insert_race.value.ui64 = 923 wmsum_value(&dbuf_sums.hash_insert_race); 924 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1; 925 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1; 926 ds->metadata_cache_count.value.ui64 = 927 wmsum_value(&dbuf_sums.metadata_cache_count); 928 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count( 929 &dbuf_caches[DB_DBUF_METADATA_CACHE].size); 930 ds->metadata_cache_overflow.value.ui64 = 931 wmsum_value(&dbuf_sums.metadata_cache_overflow); 932 return (0); 933 } 934 935 void 936 dbuf_init(void) 937 { 938 uint64_t hmsize, hsize = 1ULL << 16; 939 dbuf_hash_table_t *h = &dbuf_hash_table; 940 941 /* 942 * The hash table is big enough to fill one eighth of physical memory 943 * with an average block size of zfs_arc_average_blocksize (default 8K). 944 * By default, the table will take up 945 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 946 */ 947 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8) 948 hsize <<= 1; 949 950 h->hash_table = NULL; 951 while (h->hash_table == NULL) { 952 h->hash_table_mask = hsize - 1; 953 954 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP); 955 if (h->hash_table == NULL) 956 hsize >>= 1; 957 958 ASSERT3U(hsize, >=, 1ULL << 10); 959 } 960 961 /* 962 * The hash table buckets are protected by an array of mutexes where 963 * each mutex is reponsible for protecting 128 buckets. A minimum 964 * array size of 8192 is targeted to avoid contention. 965 */ 966 if (dbuf_mutex_cache_shift == 0) 967 hmsize = MAX(hsize >> 7, 1ULL << 13); 968 else 969 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24); 970 971 h->hash_mutexes = NULL; 972 while (h->hash_mutexes == NULL) { 973 h->hash_mutex_mask = hmsize - 1; 974 975 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t), 976 KM_SLEEP); 977 if (h->hash_mutexes == NULL) 978 hmsize >>= 1; 979 } 980 981 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 982 sizeof (dmu_buf_impl_t), 983 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 984 dbuf_dirty_kmem_cache = kmem_cache_create("dbuf_dirty_record_t", 985 sizeof (dbuf_dirty_record_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 986 987 for (int i = 0; i < hmsize; i++) 988 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_NOLOCKDEP, NULL); 989 990 dbuf_stats_init(h); 991 992 /* 993 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 994 * configuration is not required. 995 */ 996 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0); 997 998 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 999 multilist_create(&dbuf_caches[dcs].cache, 1000 sizeof (dmu_buf_impl_t), 1001 offsetof(dmu_buf_impl_t, db_cache_link), 1002 dbuf_cache_multilist_index_func); 1003 zfs_refcount_create(&dbuf_caches[dcs].size); 1004 } 1005 1006 dbuf_evict_thread_exit = B_FALSE; 1007 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 1008 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 1009 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 1010 NULL, 0, &p0, TS_RUN, minclsyspri); 1011 1012 wmsum_init(&dbuf_sums.cache_count, 0); 1013 wmsum_init(&dbuf_sums.cache_total_evicts, 0); 1014 for (int i = 0; i < DN_MAX_LEVELS; i++) { 1015 wmsum_init(&dbuf_sums.cache_levels[i], 0); 1016 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0); 1017 } 1018 wmsum_init(&dbuf_sums.hash_hits, 0); 1019 wmsum_init(&dbuf_sums.hash_misses, 0); 1020 wmsum_init(&dbuf_sums.hash_collisions, 0); 1021 wmsum_init(&dbuf_sums.hash_elements, 0); 1022 wmsum_init(&dbuf_sums.hash_chains, 0); 1023 wmsum_init(&dbuf_sums.hash_insert_race, 0); 1024 wmsum_init(&dbuf_sums.metadata_cache_count, 0); 1025 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0); 1026 1027 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc", 1028 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t), 1029 KSTAT_FLAG_VIRTUAL); 1030 if (dbuf_ksp != NULL) { 1031 for (int i = 0; i < DN_MAX_LEVELS; i++) { 1032 snprintf(dbuf_stats.cache_levels[i].name, 1033 KSTAT_STRLEN, "cache_level_%d", i); 1034 dbuf_stats.cache_levels[i].data_type = 1035 KSTAT_DATA_UINT64; 1036 snprintf(dbuf_stats.cache_levels_bytes[i].name, 1037 KSTAT_STRLEN, "cache_level_%d_bytes", i); 1038 dbuf_stats.cache_levels_bytes[i].data_type = 1039 KSTAT_DATA_UINT64; 1040 } 1041 dbuf_ksp->ks_data = &dbuf_stats; 1042 dbuf_ksp->ks_update = dbuf_kstat_update; 1043 kstat_install(dbuf_ksp); 1044 } 1045 } 1046 1047 void 1048 dbuf_fini(void) 1049 { 1050 dbuf_hash_table_t *h = &dbuf_hash_table; 1051 1052 dbuf_stats_destroy(); 1053 1054 for (int i = 0; i < (h->hash_mutex_mask + 1); i++) 1055 mutex_destroy(&h->hash_mutexes[i]); 1056 1057 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 1058 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) * 1059 sizeof (kmutex_t)); 1060 1061 kmem_cache_destroy(dbuf_kmem_cache); 1062 kmem_cache_destroy(dbuf_dirty_kmem_cache); 1063 taskq_destroy(dbu_evict_taskq); 1064 1065 mutex_enter(&dbuf_evict_lock); 1066 dbuf_evict_thread_exit = B_TRUE; 1067 while (dbuf_evict_thread_exit) { 1068 cv_signal(&dbuf_evict_cv); 1069 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 1070 } 1071 mutex_exit(&dbuf_evict_lock); 1072 1073 mutex_destroy(&dbuf_evict_lock); 1074 cv_destroy(&dbuf_evict_cv); 1075 1076 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 1077 zfs_refcount_destroy(&dbuf_caches[dcs].size); 1078 multilist_destroy(&dbuf_caches[dcs].cache); 1079 } 1080 1081 if (dbuf_ksp != NULL) { 1082 kstat_delete(dbuf_ksp); 1083 dbuf_ksp = NULL; 1084 } 1085 1086 wmsum_fini(&dbuf_sums.cache_count); 1087 wmsum_fini(&dbuf_sums.cache_total_evicts); 1088 for (int i = 0; i < DN_MAX_LEVELS; i++) { 1089 wmsum_fini(&dbuf_sums.cache_levels[i]); 1090 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]); 1091 } 1092 wmsum_fini(&dbuf_sums.hash_hits); 1093 wmsum_fini(&dbuf_sums.hash_misses); 1094 wmsum_fini(&dbuf_sums.hash_collisions); 1095 wmsum_fini(&dbuf_sums.hash_elements); 1096 wmsum_fini(&dbuf_sums.hash_chains); 1097 wmsum_fini(&dbuf_sums.hash_insert_race); 1098 wmsum_fini(&dbuf_sums.metadata_cache_count); 1099 wmsum_fini(&dbuf_sums.metadata_cache_overflow); 1100 } 1101 1102 /* 1103 * Other stuff. 1104 */ 1105 1106 #ifdef ZFS_DEBUG 1107 static void 1108 dbuf_verify(dmu_buf_impl_t *db) 1109 { 1110 dnode_t *dn; 1111 dbuf_dirty_record_t *dr; 1112 uint32_t txg_prev; 1113 1114 ASSERT(MUTEX_HELD(&db->db_mtx)); 1115 1116 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 1117 return; 1118 1119 ASSERT(db->db_objset != NULL); 1120 DB_DNODE_ENTER(db); 1121 dn = DB_DNODE(db); 1122 if (dn == NULL) { 1123 ASSERT(db->db_parent == NULL); 1124 ASSERT(db->db_blkptr == NULL); 1125 } else { 1126 ASSERT3U(db->db.db_object, ==, dn->dn_object); 1127 ASSERT3P(db->db_objset, ==, dn->dn_objset); 1128 ASSERT3U(db->db_level, <, dn->dn_nlevels); 1129 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 1130 db->db_blkid == DMU_SPILL_BLKID || 1131 !avl_is_empty(&dn->dn_dbufs)); 1132 } 1133 if (db->db_blkid == DMU_BONUS_BLKID) { 1134 ASSERT(dn != NULL); 1135 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1136 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 1137 } else if (db->db_blkid == DMU_SPILL_BLKID) { 1138 ASSERT(dn != NULL); 1139 ASSERT0(db->db.db_offset); 1140 } else { 1141 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 1142 } 1143 1144 if ((dr = list_head(&db->db_dirty_records)) != NULL) { 1145 ASSERT(dr->dr_dbuf == db); 1146 txg_prev = dr->dr_txg; 1147 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL; 1148 dr = list_next(&db->db_dirty_records, dr)) { 1149 ASSERT(dr->dr_dbuf == db); 1150 ASSERT(txg_prev > dr->dr_txg); 1151 txg_prev = dr->dr_txg; 1152 } 1153 } 1154 1155 /* 1156 * We can't assert that db_size matches dn_datablksz because it 1157 * can be momentarily different when another thread is doing 1158 * dnode_set_blksz(). 1159 */ 1160 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 1161 dr = db->db_data_pending; 1162 /* 1163 * It should only be modified in syncing context, so 1164 * make sure we only have one copy of the data. 1165 */ 1166 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 1167 } 1168 1169 /* verify db->db_blkptr */ 1170 if (db->db_blkptr) { 1171 if (db->db_parent == dn->dn_dbuf) { 1172 /* db is pointed to by the dnode */ 1173 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 1174 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 1175 ASSERT(db->db_parent == NULL); 1176 else 1177 ASSERT(db->db_parent != NULL); 1178 if (db->db_blkid != DMU_SPILL_BLKID) 1179 ASSERT3P(db->db_blkptr, ==, 1180 &dn->dn_phys->dn_blkptr[db->db_blkid]); 1181 } else { 1182 /* db is pointed to by an indirect block */ 1183 int epb __maybe_unused = db->db_parent->db.db_size >> 1184 SPA_BLKPTRSHIFT; 1185 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 1186 ASSERT3U(db->db_parent->db.db_object, ==, 1187 db->db.db_object); 1188 /* 1189 * dnode_grow_indblksz() can make this fail if we don't 1190 * have the parent's rwlock. XXX indblksz no longer 1191 * grows. safe to do this now? 1192 */ 1193 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) { 1194 ASSERT3P(db->db_blkptr, ==, 1195 ((blkptr_t *)db->db_parent->db.db_data + 1196 db->db_blkid % epb)); 1197 } 1198 } 1199 } 1200 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 1201 (db->db_buf == NULL || db->db_buf->b_data) && 1202 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 1203 db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) { 1204 /* 1205 * If the blkptr isn't set but they have nonzero data, 1206 * it had better be dirty, otherwise we'll lose that 1207 * data when we evict this buffer. 1208 * 1209 * There is an exception to this rule for indirect blocks; in 1210 * this case, if the indirect block is a hole, we fill in a few 1211 * fields on each of the child blocks (importantly, birth time) 1212 * to prevent hole birth times from being lost when you 1213 * partially fill in a hole. 1214 */ 1215 if (db->db_dirtycnt == 0) { 1216 if (db->db_level == 0) { 1217 uint64_t *buf = db->db.db_data; 1218 int i; 1219 1220 for (i = 0; i < db->db.db_size >> 3; i++) { 1221 ASSERT(buf[i] == 0); 1222 } 1223 } else { 1224 blkptr_t *bps = db->db.db_data; 1225 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 1226 db->db.db_size); 1227 /* 1228 * We want to verify that all the blkptrs in the 1229 * indirect block are holes, but we may have 1230 * automatically set up a few fields for them. 1231 * We iterate through each blkptr and verify 1232 * they only have those fields set. 1233 */ 1234 for (int i = 0; 1235 i < db->db.db_size / sizeof (blkptr_t); 1236 i++) { 1237 blkptr_t *bp = &bps[i]; 1238 ASSERT(ZIO_CHECKSUM_IS_ZERO( 1239 &bp->blk_cksum)); 1240 ASSERT( 1241 DVA_IS_EMPTY(&bp->blk_dva[0]) && 1242 DVA_IS_EMPTY(&bp->blk_dva[1]) && 1243 DVA_IS_EMPTY(&bp->blk_dva[2])); 1244 ASSERT0(bp->blk_fill); 1245 ASSERT0(bp->blk_pad[0]); 1246 ASSERT0(bp->blk_pad[1]); 1247 ASSERT(!BP_IS_EMBEDDED(bp)); 1248 ASSERT(BP_IS_HOLE(bp)); 1249 ASSERT0(BP_GET_PHYSICAL_BIRTH(bp)); 1250 } 1251 } 1252 } 1253 } 1254 DB_DNODE_EXIT(db); 1255 } 1256 #endif 1257 1258 static void 1259 dbuf_clear_data(dmu_buf_impl_t *db) 1260 { 1261 ASSERT(MUTEX_HELD(&db->db_mtx)); 1262 dbuf_evict_user(db); 1263 ASSERT3P(db->db_buf, ==, NULL); 1264 db->db.db_data = NULL; 1265 if (db->db_state != DB_NOFILL) { 1266 db->db_state = DB_UNCACHED; 1267 DTRACE_SET_STATE(db, "clear data"); 1268 } 1269 } 1270 1271 static void 1272 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 1273 { 1274 ASSERT(MUTEX_HELD(&db->db_mtx)); 1275 ASSERT(buf != NULL); 1276 1277 db->db_buf = buf; 1278 ASSERT(buf->b_data != NULL); 1279 db->db.db_data = buf->b_data; 1280 } 1281 1282 static arc_buf_t * 1283 dbuf_alloc_arcbuf(dmu_buf_impl_t *db) 1284 { 1285 spa_t *spa = db->db_objset->os_spa; 1286 1287 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 1288 } 1289 1290 /* 1291 * Calculate which level n block references the data at the level 0 offset 1292 * provided. 1293 */ 1294 uint64_t 1295 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset) 1296 { 1297 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 1298 /* 1299 * The level n blkid is equal to the level 0 blkid divided by 1300 * the number of level 0s in a level n block. 1301 * 1302 * The level 0 blkid is offset >> datablkshift = 1303 * offset / 2^datablkshift. 1304 * 1305 * The number of level 0s in a level n is the number of block 1306 * pointers in an indirect block, raised to the power of level. 1307 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 1308 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 1309 * 1310 * Thus, the level n blkid is: offset / 1311 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT)))) 1312 * = offset / 2^(datablkshift + level * 1313 * (indblkshift - SPA_BLKPTRSHIFT)) 1314 * = offset >> (datablkshift + level * 1315 * (indblkshift - SPA_BLKPTRSHIFT)) 1316 */ 1317 1318 const unsigned exp = dn->dn_datablkshift + 1319 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 1320 1321 if (exp >= 8 * sizeof (offset)) { 1322 /* This only happens on the highest indirection level */ 1323 ASSERT3U(level, ==, dn->dn_nlevels - 1); 1324 return (0); 1325 } 1326 1327 ASSERT3U(exp, <, 8 * sizeof (offset)); 1328 1329 return (offset >> exp); 1330 } else { 1331 ASSERT3U(offset, <, dn->dn_datablksz); 1332 return (0); 1333 } 1334 } 1335 1336 /* 1337 * This function is used to lock the parent of the provided dbuf. This should be 1338 * used when modifying or reading db_blkptr. 1339 */ 1340 db_lock_type_t 1341 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag) 1342 { 1343 enum db_lock_type ret = DLT_NONE; 1344 if (db->db_parent != NULL) { 1345 rw_enter(&db->db_parent->db_rwlock, rw); 1346 ret = DLT_PARENT; 1347 } else if (dmu_objset_ds(db->db_objset) != NULL) { 1348 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw, 1349 tag); 1350 ret = DLT_OBJSET; 1351 } 1352 /* 1353 * We only return a DLT_NONE lock when it's the top-most indirect block 1354 * of the meta-dnode of the MOS. 1355 */ 1356 return (ret); 1357 } 1358 1359 /* 1360 * We need to pass the lock type in because it's possible that the block will 1361 * move from being the topmost indirect block in a dnode (and thus, have no 1362 * parent) to not the top-most via an indirection increase. This would cause a 1363 * panic if we didn't pass the lock type in. 1364 */ 1365 void 1366 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag) 1367 { 1368 if (type == DLT_PARENT) 1369 rw_exit(&db->db_parent->db_rwlock); 1370 else if (type == DLT_OBJSET) 1371 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag); 1372 } 1373 1374 static void 1375 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1376 arc_buf_t *buf, void *vdb) 1377 { 1378 (void) zb, (void) bp; 1379 dmu_buf_impl_t *db = vdb; 1380 1381 mutex_enter(&db->db_mtx); 1382 ASSERT3U(db->db_state, ==, DB_READ); 1383 1384 /* 1385 * All reads are synchronous, so we must have a hold on the dbuf 1386 */ 1387 ASSERT(zfs_refcount_count(&db->db_holds) > 0); 1388 ASSERT(db->db_buf == NULL); 1389 ASSERT(db->db.db_data == NULL); 1390 if (buf == NULL) { 1391 /* i/o error */ 1392 ASSERT(zio == NULL || zio->io_error != 0); 1393 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1394 ASSERT3P(db->db_buf, ==, NULL); 1395 db->db_state = DB_UNCACHED; 1396 DTRACE_SET_STATE(db, "i/o error"); 1397 } else if (db->db_level == 0 && db->db_freed_in_flight) { 1398 /* freed in flight */ 1399 ASSERT(zio == NULL || zio->io_error == 0); 1400 arc_release(buf, db); 1401 memset(buf->b_data, 0, db->db.db_size); 1402 arc_buf_freeze(buf); 1403 db->db_freed_in_flight = FALSE; 1404 dbuf_set_data(db, buf); 1405 db->db_state = DB_CACHED; 1406 DTRACE_SET_STATE(db, "freed in flight"); 1407 } else { 1408 /* success */ 1409 ASSERT(zio == NULL || zio->io_error == 0); 1410 dbuf_set_data(db, buf); 1411 db->db_state = DB_CACHED; 1412 DTRACE_SET_STATE(db, "successful read"); 1413 } 1414 cv_broadcast(&db->db_changed); 1415 dbuf_rele_and_unlock(db, NULL, B_FALSE); 1416 } 1417 1418 /* 1419 * Shortcut for performing reads on bonus dbufs. Returns 1420 * an error if we fail to verify the dnode associated with 1421 * a decrypted block. Otherwise success. 1422 */ 1423 static int 1424 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn) 1425 { 1426 void* db_data; 1427 int bonuslen, max_bonuslen; 1428 1429 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 1430 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1431 ASSERT(MUTEX_HELD(&db->db_mtx)); 1432 ASSERT(DB_DNODE_HELD(db)); 1433 ASSERT3U(bonuslen, <=, db->db.db_size); 1434 db_data = kmem_alloc(max_bonuslen, KM_SLEEP); 1435 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 1436 if (bonuslen < max_bonuslen) 1437 memset(db_data, 0, max_bonuslen); 1438 if (bonuslen) 1439 memcpy(db_data, DN_BONUS(dn->dn_phys), bonuslen); 1440 db->db.db_data = db_data; 1441 db->db_state = DB_CACHED; 1442 DTRACE_SET_STATE(db, "bonus buffer filled"); 1443 return (0); 1444 } 1445 1446 static void 1447 dbuf_handle_indirect_hole(void *data, dnode_t *dn, blkptr_t *dbbp) 1448 { 1449 blkptr_t *bps = data; 1450 uint32_t indbs = 1ULL << dn->dn_indblkshift; 1451 int n_bps = indbs >> SPA_BLKPTRSHIFT; 1452 1453 for (int i = 0; i < n_bps; i++) { 1454 blkptr_t *bp = &bps[i]; 1455 1456 ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs); 1457 BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ? 1458 dn->dn_datablksz : BP_GET_LSIZE(dbbp)); 1459 BP_SET_TYPE(bp, BP_GET_TYPE(dbbp)); 1460 BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1); 1461 BP_SET_BIRTH(bp, BP_GET_LOGICAL_BIRTH(dbbp), 0); 1462 } 1463 } 1464 1465 /* 1466 * Handle reads on dbufs that are holes, if necessary. This function 1467 * requires that the dbuf's mutex is held. Returns success (0) if action 1468 * was taken, ENOENT if no action was taken. 1469 */ 1470 static int 1471 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp) 1472 { 1473 ASSERT(MUTEX_HELD(&db->db_mtx)); 1474 arc_buf_t *db_data; 1475 1476 int is_hole = bp == NULL || BP_IS_HOLE(bp); 1477 /* 1478 * For level 0 blocks only, if the above check fails: 1479 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 1480 * processes the delete record and clears the bp while we are waiting 1481 * for the dn_mtx (resulting in a "no" from block_freed). 1482 */ 1483 if (!is_hole && db->db_level == 0) 1484 is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp); 1485 1486 if (is_hole) { 1487 db_data = dbuf_alloc_arcbuf(db); 1488 memset(db_data->b_data, 0, db->db.db_size); 1489 1490 if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) && 1491 BP_GET_LOGICAL_BIRTH(bp) != 0) { 1492 dbuf_handle_indirect_hole(db_data->b_data, dn, bp); 1493 } 1494 dbuf_set_data(db, db_data); 1495 db->db_state = DB_CACHED; 1496 DTRACE_SET_STATE(db, "hole read satisfied"); 1497 return (0); 1498 } 1499 return (ENOENT); 1500 } 1501 1502 /* 1503 * This function ensures that, when doing a decrypting read of a block, 1504 * we make sure we have decrypted the dnode associated with it. We must do 1505 * this so that we ensure we are fully authenticating the checksum-of-MACs 1506 * tree from the root of the objset down to this block. Indirect blocks are 1507 * always verified against their secure checksum-of-MACs assuming that the 1508 * dnode containing them is correct. Now that we are doing a decrypting read, 1509 * we can be sure that the key is loaded and verify that assumption. This is 1510 * especially important considering that we always read encrypted dnode 1511 * blocks as raw data (without verifying their MACs) to start, and 1512 * decrypt / authenticate them when we need to read an encrypted bonus buffer. 1513 */ 1514 static int 1515 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, dnode_t *dn, 1516 dmu_flags_t flags) 1517 { 1518 objset_t *os = db->db_objset; 1519 dmu_buf_impl_t *dndb; 1520 arc_buf_t *dnbuf; 1521 zbookmark_phys_t zb; 1522 int err; 1523 1524 if ((flags & DMU_READ_NO_DECRYPT) != 0 || 1525 !os->os_encrypted || os->os_raw_receive || 1526 (dndb = dn->dn_dbuf) == NULL) 1527 return (0); 1528 1529 dnbuf = dndb->db_buf; 1530 if (!arc_is_encrypted(dnbuf)) 1531 return (0); 1532 1533 mutex_enter(&dndb->db_mtx); 1534 1535 /* 1536 * Since dnode buffer is modified by sync process, there can be only 1537 * one copy of it. It means we can not modify (decrypt) it while it 1538 * is being written. I don't see how this may happen now, since 1539 * encrypted dnode writes by receive should be completed before any 1540 * plain-text reads due to txg wait, but better be safe than sorry. 1541 */ 1542 while (1) { 1543 if (!arc_is_encrypted(dnbuf)) { 1544 mutex_exit(&dndb->db_mtx); 1545 return (0); 1546 } 1547 dbuf_dirty_record_t *dr = dndb->db_data_pending; 1548 if (dr == NULL || dr->dt.dl.dr_data != dnbuf) 1549 break; 1550 cv_wait(&dndb->db_changed, &dndb->db_mtx); 1551 }; 1552 1553 SET_BOOKMARK(&zb, dmu_objset_id(os), 1554 DMU_META_DNODE_OBJECT, 0, dndb->db_blkid); 1555 err = arc_untransform(dnbuf, os->os_spa, &zb, B_TRUE); 1556 1557 /* 1558 * An error code of EACCES tells us that the key is still not 1559 * available. This is ok if we are only reading authenticated 1560 * (and therefore non-encrypted) blocks. 1561 */ 1562 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && 1563 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || 1564 (db->db_blkid == DMU_BONUS_BLKID && 1565 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) 1566 err = 0; 1567 1568 mutex_exit(&dndb->db_mtx); 1569 1570 return (err); 1571 } 1572 1573 /* 1574 * Drops db_mtx and the parent lock specified by dblt and tag before 1575 * returning. 1576 */ 1577 static int 1578 dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, dmu_flags_t flags, 1579 db_lock_type_t dblt, blkptr_t *bp, const void *tag) 1580 { 1581 zbookmark_phys_t zb; 1582 uint32_t aflags = ARC_FLAG_NOWAIT; 1583 int err, zio_flags; 1584 1585 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1586 ASSERT(MUTEX_HELD(&db->db_mtx)); 1587 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1588 ASSERT(db->db_buf == NULL); 1589 ASSERT(db->db_parent == NULL || 1590 RW_LOCK_HELD(&db->db_parent->db_rwlock)); 1591 1592 if (db->db_blkid == DMU_BONUS_BLKID) { 1593 err = dbuf_read_bonus(db, dn); 1594 goto early_unlock; 1595 } 1596 1597 err = dbuf_read_hole(db, dn, bp); 1598 if (err == 0) 1599 goto early_unlock; 1600 1601 ASSERT(bp != NULL); 1602 1603 /* 1604 * Any attempt to read a redacted block should result in an error. This 1605 * will never happen under normal conditions, but can be useful for 1606 * debugging purposes. 1607 */ 1608 if (BP_IS_REDACTED(bp)) { 1609 ASSERT(dsl_dataset_feature_is_active( 1610 db->db_objset->os_dsl_dataset, 1611 SPA_FEATURE_REDACTED_DATASETS)); 1612 err = SET_ERROR(EIO); 1613 goto early_unlock; 1614 } 1615 1616 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1617 db->db.db_object, db->db_level, db->db_blkid); 1618 1619 /* 1620 * All bps of an encrypted os should have the encryption bit set. 1621 * If this is not true it indicates tampering and we report an error. 1622 */ 1623 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bp)) { 1624 spa_log_error(db->db_objset->os_spa, &zb, 1625 BP_GET_LOGICAL_BIRTH(bp)); 1626 err = SET_ERROR(EIO); 1627 goto early_unlock; 1628 } 1629 1630 db->db_state = DB_READ; 1631 DTRACE_SET_STATE(db, "read issued"); 1632 mutex_exit(&db->db_mtx); 1633 1634 if (!DBUF_IS_CACHEABLE(db)) 1635 aflags |= ARC_FLAG_UNCACHED; 1636 else if (dbuf_is_l2cacheable(db, bp)) 1637 aflags |= ARC_FLAG_L2CACHE; 1638 1639 dbuf_add_ref(db, NULL); 1640 1641 zio_flags = (flags & DB_RF_CANFAIL) ? 1642 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; 1643 1644 if ((flags & DMU_READ_NO_DECRYPT) && BP_IS_PROTECTED(bp)) 1645 zio_flags |= ZIO_FLAG_RAW; 1646 1647 /* 1648 * The zio layer will copy the provided blkptr later, but we need to 1649 * do this now so that we can release the parent's rwlock. We have to 1650 * do that now so that if dbuf_read_done is called synchronously (on 1651 * an l1 cache hit) we don't acquire the db_mtx while holding the 1652 * parent's rwlock, which would be a lock ordering violation. 1653 */ 1654 blkptr_t copy = *bp; 1655 dmu_buf_unlock_parent(db, dblt, tag); 1656 return (arc_read(zio, db->db_objset->os_spa, ©, 1657 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, 1658 &aflags, &zb)); 1659 1660 early_unlock: 1661 mutex_exit(&db->db_mtx); 1662 dmu_buf_unlock_parent(db, dblt, tag); 1663 return (err); 1664 } 1665 1666 /* 1667 * This is our just-in-time copy function. It makes a copy of buffers that 1668 * have been modified in a previous transaction group before we access them in 1669 * the current active group. 1670 * 1671 * This function is used in three places: when we are dirtying a buffer for the 1672 * first time in a txg, when we are freeing a range in a dnode that includes 1673 * this buffer, and when we are accessing a buffer which was received compressed 1674 * and later referenced in a WRITE_BYREF record. 1675 * 1676 * Note that when we are called from dbuf_free_range() we do not put a hold on 1677 * the buffer, we just traverse the active dbuf list for the dnode. 1678 */ 1679 static void 1680 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1681 { 1682 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 1683 1684 ASSERT(MUTEX_HELD(&db->db_mtx)); 1685 ASSERT(db->db.db_data != NULL); 1686 ASSERT(db->db_level == 0); 1687 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1688 1689 if (dr == NULL || 1690 (dr->dt.dl.dr_data != 1691 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1692 return; 1693 1694 /* 1695 * If the last dirty record for this dbuf has not yet synced 1696 * and its referencing the dbuf data, either: 1697 * reset the reference to point to a new copy, 1698 * or (if there a no active holders) 1699 * just null out the current db_data pointer. 1700 */ 1701 ASSERT3U(dr->dr_txg, >=, txg - 2); 1702 if (db->db_blkid == DMU_BONUS_BLKID) { 1703 dnode_t *dn = DB_DNODE(db); 1704 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1705 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP); 1706 arc_space_consume(bonuslen, ARC_SPACE_BONUS); 1707 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen); 1708 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1709 dnode_t *dn = DB_DNODE(db); 1710 int size = arc_buf_size(db->db_buf); 1711 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1712 spa_t *spa = db->db_objset->os_spa; 1713 enum zio_compress compress_type = 1714 arc_get_compression(db->db_buf); 1715 uint8_t complevel = arc_get_complevel(db->db_buf); 1716 1717 if (arc_is_encrypted(db->db_buf)) { 1718 boolean_t byteorder; 1719 uint8_t salt[ZIO_DATA_SALT_LEN]; 1720 uint8_t iv[ZIO_DATA_IV_LEN]; 1721 uint8_t mac[ZIO_DATA_MAC_LEN]; 1722 1723 arc_get_raw_params(db->db_buf, &byteorder, salt, 1724 iv, mac); 1725 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, 1726 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, 1727 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), 1728 compress_type, complevel); 1729 } else if (compress_type != ZIO_COMPRESS_OFF) { 1730 ASSERT3U(type, ==, ARC_BUFC_DATA); 1731 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1732 size, arc_buf_lsize(db->db_buf), compress_type, 1733 complevel); 1734 } else { 1735 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1736 } 1737 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size); 1738 } else { 1739 db->db_buf = NULL; 1740 dbuf_clear_data(db); 1741 } 1742 } 1743 1744 int 1745 dbuf_read(dmu_buf_impl_t *db, zio_t *pio, dmu_flags_t flags) 1746 { 1747 dnode_t *dn; 1748 boolean_t miss = B_TRUE, need_wait = B_FALSE, prefetch; 1749 int err; 1750 1751 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1752 1753 DB_DNODE_ENTER(db); 1754 dn = DB_DNODE(db); 1755 1756 /* 1757 * Ensure that this block's dnode has been decrypted if the caller 1758 * has requested decrypted data. 1759 */ 1760 err = dbuf_read_verify_dnode_crypt(db, dn, flags); 1761 if (err != 0) 1762 goto done; 1763 1764 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1765 (flags & DMU_READ_NO_PREFETCH) == 0; 1766 1767 mutex_enter(&db->db_mtx); 1768 if (!(flags & (DMU_UNCACHEDIO | DMU_KEEP_CACHING))) 1769 db->db_pending_evict = B_FALSE; 1770 if (flags & DMU_PARTIAL_FIRST) 1771 db->db_partial_read = B_TRUE; 1772 else if (!(flags & (DMU_PARTIAL_MORE | DMU_KEEP_CACHING))) 1773 db->db_partial_read = B_FALSE; 1774 miss = (db->db_state != DB_CACHED); 1775 1776 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1777 /* 1778 * Another reader came in while the dbuf was in flight between 1779 * UNCACHED and CACHED. Either a writer will finish filling 1780 * the buffer, sending the dbuf to CACHED, or the first reader's 1781 * request will reach the read_done callback and send the dbuf 1782 * to CACHED. Otherwise, a failure occurred and the dbuf will 1783 * be sent to UNCACHED. 1784 */ 1785 if (flags & DB_RF_NEVERWAIT) { 1786 mutex_exit(&db->db_mtx); 1787 DB_DNODE_EXIT(db); 1788 goto done; 1789 } 1790 do { 1791 ASSERT(db->db_state == DB_READ || 1792 (flags & DB_RF_HAVESTRUCT) == 0); 1793 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, db, 1794 zio_t *, pio); 1795 cv_wait(&db->db_changed, &db->db_mtx); 1796 } while (db->db_state == DB_READ || db->db_state == DB_FILL); 1797 if (db->db_state == DB_UNCACHED) { 1798 err = SET_ERROR(EIO); 1799 mutex_exit(&db->db_mtx); 1800 DB_DNODE_EXIT(db); 1801 goto done; 1802 } 1803 } 1804 1805 if (db->db_state == DB_CACHED) { 1806 /* 1807 * If the arc buf is compressed or encrypted and the caller 1808 * requested uncompressed data, we need to untransform it 1809 * before returning. We also call arc_untransform() on any 1810 * unauthenticated blocks, which will verify their MAC if 1811 * the key is now available. 1812 */ 1813 if ((flags & DMU_READ_NO_DECRYPT) == 0 && db->db_buf != NULL && 1814 (arc_is_encrypted(db->db_buf) || 1815 arc_is_unauthenticated(db->db_buf) || 1816 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 1817 spa_t *spa = dn->dn_objset->os_spa; 1818 zbookmark_phys_t zb; 1819 1820 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1821 db->db.db_object, db->db_level, db->db_blkid); 1822 dbuf_fix_old_data(db, spa_syncing_txg(spa)); 1823 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 1824 dbuf_set_data(db, db->db_buf); 1825 } 1826 mutex_exit(&db->db_mtx); 1827 } else { 1828 ASSERT(db->db_state == DB_UNCACHED || 1829 db->db_state == DB_NOFILL); 1830 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 1831 blkptr_t *bp; 1832 1833 /* 1834 * If a block clone or Direct I/O write has occurred we will 1835 * get the dirty records overridden BP so we get the most 1836 * recent data. 1837 */ 1838 err = dmu_buf_get_bp_from_dbuf(db, &bp); 1839 1840 if (!err) { 1841 if (pio == NULL && (db->db_state == DB_NOFILL || 1842 (bp != NULL && !BP_IS_HOLE(bp)))) { 1843 spa_t *spa = dn->dn_objset->os_spa; 1844 pio = 1845 zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1846 need_wait = B_TRUE; 1847 } 1848 1849 err = 1850 dbuf_read_impl(db, dn, pio, flags, dblt, bp, FTAG); 1851 } else { 1852 mutex_exit(&db->db_mtx); 1853 dmu_buf_unlock_parent(db, dblt, FTAG); 1854 } 1855 /* dbuf_read_impl drops db_mtx and parent's rwlock. */ 1856 miss = (db->db_state != DB_CACHED); 1857 } 1858 1859 if (err == 0 && prefetch) { 1860 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, miss, 1861 flags & DB_RF_HAVESTRUCT, (flags & DMU_UNCACHEDIO) || 1862 db->db_pending_evict); 1863 } 1864 DB_DNODE_EXIT(db); 1865 1866 /* 1867 * If we created a zio we must execute it to avoid leaking it, even if 1868 * it isn't attached to any work due to an error in dbuf_read_impl(). 1869 */ 1870 if (need_wait) { 1871 if (err == 0) 1872 err = zio_wait(pio); 1873 else 1874 (void) zio_wait(pio); 1875 pio = NULL; 1876 } 1877 1878 done: 1879 if (miss) 1880 DBUF_STAT_BUMP(hash_misses); 1881 else 1882 DBUF_STAT_BUMP(hash_hits); 1883 if (pio && err != 0) { 1884 zio_t *zio = zio_null(pio, pio->io_spa, NULL, NULL, NULL, 1885 ZIO_FLAG_CANFAIL); 1886 zio->io_error = err; 1887 zio_nowait(zio); 1888 } 1889 1890 return (err); 1891 } 1892 1893 static void 1894 dbuf_noread(dmu_buf_impl_t *db, dmu_flags_t flags) 1895 { 1896 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1897 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1898 mutex_enter(&db->db_mtx); 1899 if (!(flags & (DMU_UNCACHEDIO | DMU_KEEP_CACHING))) 1900 db->db_pending_evict = B_FALSE; 1901 db->db_partial_read = B_FALSE; 1902 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1903 cv_wait(&db->db_changed, &db->db_mtx); 1904 if (db->db_state == DB_UNCACHED) { 1905 ASSERT(db->db_buf == NULL); 1906 ASSERT(db->db.db_data == NULL); 1907 dbuf_set_data(db, dbuf_alloc_arcbuf(db)); 1908 db->db_state = DB_FILL; 1909 DTRACE_SET_STATE(db, "assigning filled buffer"); 1910 } else if (db->db_state == DB_NOFILL) { 1911 dbuf_clear_data(db); 1912 } else { 1913 ASSERT3U(db->db_state, ==, DB_CACHED); 1914 } 1915 mutex_exit(&db->db_mtx); 1916 } 1917 1918 void 1919 dbuf_unoverride(dbuf_dirty_record_t *dr) 1920 { 1921 dmu_buf_impl_t *db = dr->dr_dbuf; 1922 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1923 uint64_t txg = dr->dr_txg; 1924 1925 ASSERT(MUTEX_HELD(&db->db_mtx)); 1926 1927 /* 1928 * This assert is valid because dmu_sync() expects to be called by 1929 * a zilog's get_data while holding a range lock. This call only 1930 * comes from dbuf_dirty() callers who must also hold a range lock. 1931 */ 1932 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1933 ASSERT(db->db_level == 0); 1934 1935 if (db->db_blkid == DMU_BONUS_BLKID || 1936 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1937 return; 1938 1939 ASSERT(db->db_data_pending != dr); 1940 1941 /* free this block */ 1942 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1943 zio_free(db->db_objset->os_spa, txg, bp); 1944 1945 if (dr->dt.dl.dr_brtwrite || dr->dt.dl.dr_diowrite) { 1946 ASSERT0P(dr->dt.dl.dr_data); 1947 dr->dt.dl.dr_data = db->db_buf; 1948 } 1949 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1950 dr->dt.dl.dr_nopwrite = B_FALSE; 1951 dr->dt.dl.dr_brtwrite = B_FALSE; 1952 dr->dt.dl.dr_diowrite = B_FALSE; 1953 dr->dt.dl.dr_has_raw_params = B_FALSE; 1954 1955 /* 1956 * In the event that Direct I/O was used, we do not 1957 * need to release the buffer from the ARC. 1958 * 1959 * Release the already-written buffer, so we leave it in 1960 * a consistent dirty state. Note that all callers are 1961 * modifying the buffer, so they will immediately do 1962 * another (redundant) arc_release(). Therefore, leave 1963 * the buf thawed to save the effort of freezing & 1964 * immediately re-thawing it. 1965 */ 1966 if (dr->dt.dl.dr_data) 1967 arc_release(dr->dt.dl.dr_data, db); 1968 } 1969 1970 /* 1971 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1972 * data blocks in the free range, so that any future readers will find 1973 * empty blocks. 1974 */ 1975 void 1976 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1977 dmu_tx_t *tx) 1978 { 1979 dmu_buf_impl_t *db_search; 1980 dmu_buf_impl_t *db, *db_next; 1981 uint64_t txg = tx->tx_txg; 1982 avl_index_t where; 1983 dbuf_dirty_record_t *dr; 1984 1985 if (end_blkid > dn->dn_maxblkid && 1986 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1987 end_blkid = dn->dn_maxblkid; 1988 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid, 1989 (u_longlong_t)end_blkid); 1990 1991 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP); 1992 db_search->db_level = 0; 1993 db_search->db_blkid = start_blkid; 1994 db_search->db_state = DB_SEARCH; 1995 1996 mutex_enter(&dn->dn_dbufs_mtx); 1997 db = avl_find(&dn->dn_dbufs, db_search, &where); 1998 ASSERT3P(db, ==, NULL); 1999 2000 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 2001 2002 for (; db != NULL; db = db_next) { 2003 db_next = AVL_NEXT(&dn->dn_dbufs, db); 2004 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2005 2006 if (db->db_level != 0 || db->db_blkid > end_blkid) { 2007 break; 2008 } 2009 ASSERT3U(db->db_blkid, >=, start_blkid); 2010 2011 /* found a level 0 buffer in the range */ 2012 mutex_enter(&db->db_mtx); 2013 if (dbuf_undirty(db, tx)) { 2014 /* mutex has been dropped and dbuf destroyed */ 2015 continue; 2016 } 2017 2018 if (db->db_state == DB_UNCACHED || 2019 db->db_state == DB_NOFILL || 2020 db->db_state == DB_EVICTING) { 2021 ASSERT(db->db.db_data == NULL); 2022 mutex_exit(&db->db_mtx); 2023 continue; 2024 } 2025 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 2026 /* will be handled in dbuf_read_done or dbuf_rele */ 2027 db->db_freed_in_flight = TRUE; 2028 mutex_exit(&db->db_mtx); 2029 continue; 2030 } 2031 if (zfs_refcount_count(&db->db_holds) == 0) { 2032 ASSERT(db->db_buf); 2033 dbuf_destroy(db); 2034 continue; 2035 } 2036 /* The dbuf is referenced */ 2037 2038 dr = list_head(&db->db_dirty_records); 2039 if (dr != NULL) { 2040 if (dr->dr_txg == txg) { 2041 /* 2042 * This buffer is "in-use", re-adjust the file 2043 * size to reflect that this buffer may 2044 * contain new data when we sync. 2045 */ 2046 if (db->db_blkid != DMU_SPILL_BLKID && 2047 db->db_blkid > dn->dn_maxblkid) 2048 dn->dn_maxblkid = db->db_blkid; 2049 dbuf_unoverride(dr); 2050 } else { 2051 /* 2052 * This dbuf is not dirty in the open context. 2053 * Either uncache it (if its not referenced in 2054 * the open context) or reset its contents to 2055 * empty. 2056 */ 2057 dbuf_fix_old_data(db, txg); 2058 } 2059 } 2060 /* clear the contents if its cached */ 2061 if (db->db_state == DB_CACHED) { 2062 ASSERT(db->db.db_data != NULL); 2063 arc_release(db->db_buf, db); 2064 rw_enter(&db->db_rwlock, RW_WRITER); 2065 memset(db->db.db_data, 0, db->db.db_size); 2066 rw_exit(&db->db_rwlock); 2067 arc_buf_freeze(db->db_buf); 2068 } 2069 2070 mutex_exit(&db->db_mtx); 2071 } 2072 2073 mutex_exit(&dn->dn_dbufs_mtx); 2074 kmem_free(db_search, sizeof (dmu_buf_impl_t)); 2075 } 2076 2077 void 2078 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 2079 { 2080 arc_buf_t *buf, *old_buf; 2081 dbuf_dirty_record_t *dr; 2082 int osize = db->db.db_size; 2083 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2084 dnode_t *dn; 2085 2086 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2087 2088 DB_DNODE_ENTER(db); 2089 dn = DB_DNODE(db); 2090 2091 /* 2092 * XXX we should be doing a dbuf_read, checking the return 2093 * value and returning that up to our callers 2094 */ 2095 dmu_buf_will_dirty(&db->db, tx); 2096 2097 VERIFY3P(db->db_buf, !=, NULL); 2098 2099 /* create the data buffer for the new block */ 2100 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 2101 2102 /* copy old block data to the new block */ 2103 old_buf = db->db_buf; 2104 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size)); 2105 /* zero the remainder */ 2106 if (size > osize) 2107 memset((uint8_t *)buf->b_data + osize, 0, size - osize); 2108 2109 mutex_enter(&db->db_mtx); 2110 dbuf_set_data(db, buf); 2111 arc_buf_destroy(old_buf, db); 2112 db->db.db_size = size; 2113 2114 dr = list_head(&db->db_dirty_records); 2115 /* dirty record added by dmu_buf_will_dirty() */ 2116 VERIFY(dr != NULL); 2117 if (db->db_level == 0) 2118 dr->dt.dl.dr_data = buf; 2119 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2120 ASSERT3U(dr->dr_accounted, ==, osize); 2121 dr->dr_accounted = size; 2122 mutex_exit(&db->db_mtx); 2123 2124 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 2125 DB_DNODE_EXIT(db); 2126 } 2127 2128 void 2129 dbuf_release_bp(dmu_buf_impl_t *db) 2130 { 2131 objset_t *os __maybe_unused = db->db_objset; 2132 2133 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 2134 ASSERT(arc_released(os->os_phys_buf) || 2135 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 2136 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 2137 2138 (void) arc_release(db->db_buf, db); 2139 } 2140 2141 /* 2142 * We already have a dirty record for this TXG, and we are being 2143 * dirtied again. 2144 */ 2145 static void 2146 dbuf_redirty(dbuf_dirty_record_t *dr) 2147 { 2148 dmu_buf_impl_t *db = dr->dr_dbuf; 2149 2150 ASSERT(MUTEX_HELD(&db->db_mtx)); 2151 2152 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 2153 /* 2154 * If this buffer has already been written out, 2155 * we now need to reset its state. 2156 */ 2157 dbuf_unoverride(dr); 2158 if (db->db.db_object != DMU_META_DNODE_OBJECT && 2159 db->db_state != DB_NOFILL) { 2160 /* Already released on initial dirty, so just thaw. */ 2161 ASSERT(arc_released(db->db_buf)); 2162 arc_buf_thaw(db->db_buf); 2163 } 2164 } 2165 } 2166 2167 dbuf_dirty_record_t * 2168 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx) 2169 { 2170 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2171 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid); 2172 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE); 2173 ASSERT(dn->dn_maxblkid >= blkid); 2174 2175 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP); 2176 list_link_init(&dr->dr_dirty_node); 2177 list_link_init(&dr->dr_dbuf_node); 2178 dr->dr_dnode = dn; 2179 dr->dr_txg = tx->tx_txg; 2180 dr->dt.dll.dr_blkid = blkid; 2181 dr->dr_accounted = dn->dn_datablksz; 2182 2183 /* 2184 * There should not be any dbuf for the block that we're dirtying. 2185 * Otherwise the buffer contents could be inconsistent between the 2186 * dbuf and the lightweight dirty record. 2187 */ 2188 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid, 2189 NULL)); 2190 2191 mutex_enter(&dn->dn_mtx); 2192 int txgoff = tx->tx_txg & TXG_MASK; 2193 if (dn->dn_free_ranges[txgoff] != NULL) { 2194 zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1); 2195 } 2196 2197 if (dn->dn_nlevels == 1) { 2198 ASSERT3U(blkid, <, dn->dn_nblkptr); 2199 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2200 mutex_exit(&dn->dn_mtx); 2201 rw_exit(&dn->dn_struct_rwlock); 2202 dnode_setdirty(dn, tx); 2203 } else { 2204 mutex_exit(&dn->dn_mtx); 2205 2206 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2207 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn, 2208 1, blkid >> epbs, FTAG); 2209 rw_exit(&dn->dn_struct_rwlock); 2210 if (parent_db == NULL) { 2211 kmem_free(dr, sizeof (*dr)); 2212 return (NULL); 2213 } 2214 int err = dbuf_read(parent_db, NULL, DB_RF_CANFAIL | 2215 DMU_READ_NO_PREFETCH); 2216 if (err != 0) { 2217 dbuf_rele(parent_db, FTAG); 2218 kmem_free(dr, sizeof (*dr)); 2219 return (NULL); 2220 } 2221 2222 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx); 2223 dbuf_rele(parent_db, FTAG); 2224 mutex_enter(&parent_dr->dt.di.dr_mtx); 2225 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg); 2226 list_insert_tail(&parent_dr->dt.di.dr_children, dr); 2227 mutex_exit(&parent_dr->dt.di.dr_mtx); 2228 dr->dr_parent = parent_dr; 2229 } 2230 2231 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx); 2232 2233 return (dr); 2234 } 2235 2236 dbuf_dirty_record_t * 2237 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2238 { 2239 dnode_t *dn; 2240 objset_t *os; 2241 dbuf_dirty_record_t *dr, *dr_next, *dr_head; 2242 int txgoff = tx->tx_txg & TXG_MASK; 2243 boolean_t drop_struct_rwlock = B_FALSE; 2244 2245 ASSERT(tx->tx_txg != 0); 2246 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2247 DMU_TX_DIRTY_BUF(tx, db); 2248 2249 DB_DNODE_ENTER(db); 2250 dn = DB_DNODE(db); 2251 /* 2252 * Shouldn't dirty a regular buffer in syncing context. Private 2253 * objects may be dirtied in syncing context, but only if they 2254 * were already pre-dirtied in open context. 2255 */ 2256 #ifdef ZFS_DEBUG 2257 if (dn->dn_objset->os_dsl_dataset != NULL) { 2258 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 2259 RW_READER, FTAG); 2260 } 2261 ASSERT(!dmu_tx_is_syncing(tx) || 2262 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 2263 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2264 dn->dn_objset->os_dsl_dataset == NULL); 2265 if (dn->dn_objset->os_dsl_dataset != NULL) 2266 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 2267 #endif 2268 /* 2269 * We make this assert for private objects as well, but after we 2270 * check if we're already dirty. They are allowed to re-dirty 2271 * in syncing context. 2272 */ 2273 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2274 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2275 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2276 2277 mutex_enter(&db->db_mtx); 2278 /* 2279 * XXX make this true for indirects too? The problem is that 2280 * transactions created with dmu_tx_create_assigned() from 2281 * syncing context don't bother holding ahead. 2282 */ 2283 ASSERT(db->db_level != 0 || 2284 db->db_state == DB_CACHED || db->db_state == DB_FILL || 2285 db->db_state == DB_NOFILL); 2286 2287 mutex_enter(&dn->dn_mtx); 2288 dnode_set_dirtyctx(dn, tx, db); 2289 if (tx->tx_txg > dn->dn_dirty_txg) 2290 dn->dn_dirty_txg = tx->tx_txg; 2291 mutex_exit(&dn->dn_mtx); 2292 2293 if (db->db_blkid == DMU_SPILL_BLKID) 2294 dn->dn_have_spill = B_TRUE; 2295 2296 /* 2297 * If this buffer is already dirty, we're done. 2298 */ 2299 dr_head = list_head(&db->db_dirty_records); 2300 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg || 2301 db->db.db_object == DMU_META_DNODE_OBJECT); 2302 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg); 2303 if (dr_next && dr_next->dr_txg == tx->tx_txg) { 2304 DB_DNODE_EXIT(db); 2305 2306 dbuf_redirty(dr_next); 2307 mutex_exit(&db->db_mtx); 2308 return (dr_next); 2309 } 2310 2311 /* 2312 * Only valid if not already dirty. 2313 */ 2314 ASSERT(dn->dn_object == 0 || 2315 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 2316 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 2317 2318 ASSERT3U(dn->dn_nlevels, >, db->db_level); 2319 2320 /* 2321 * We should only be dirtying in syncing context if it's the 2322 * mos or we're initializing the os or it's a special object. 2323 * However, we are allowed to dirty in syncing context provided 2324 * we already dirtied it in open context. Hence we must make 2325 * this assertion only if we're not already dirty. 2326 */ 2327 os = dn->dn_objset; 2328 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 2329 #ifdef ZFS_DEBUG 2330 if (dn->dn_objset->os_dsl_dataset != NULL) 2331 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 2332 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 2333 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 2334 if (dn->dn_objset->os_dsl_dataset != NULL) 2335 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 2336 #endif 2337 ASSERT(db->db.db_size != 0); 2338 2339 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2340 2341 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) { 2342 dmu_objset_willuse_space(os, db->db.db_size, tx); 2343 } 2344 2345 /* 2346 * If this buffer is dirty in an old transaction group we need 2347 * to make a copy of it so that the changes we make in this 2348 * transaction group won't leak out when we sync the older txg. 2349 */ 2350 dr = kmem_cache_alloc(dbuf_dirty_kmem_cache, KM_SLEEP); 2351 memset(dr, 0, sizeof (*dr)); 2352 list_link_init(&dr->dr_dirty_node); 2353 list_link_init(&dr->dr_dbuf_node); 2354 dr->dr_dnode = dn; 2355 if (db->db_level == 0) { 2356 void *data_old = db->db_buf; 2357 2358 if (db->db_state != DB_NOFILL) { 2359 if (db->db_blkid == DMU_BONUS_BLKID) { 2360 dbuf_fix_old_data(db, tx->tx_txg); 2361 data_old = db->db.db_data; 2362 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 2363 /* 2364 * Release the data buffer from the cache so 2365 * that we can modify it without impacting 2366 * possible other users of this cached data 2367 * block. Note that indirect blocks and 2368 * private objects are not released until the 2369 * syncing state (since they are only modified 2370 * then). 2371 */ 2372 arc_release(db->db_buf, db); 2373 dbuf_fix_old_data(db, tx->tx_txg); 2374 data_old = db->db_buf; 2375 } 2376 ASSERT(data_old != NULL); 2377 } 2378 dr->dt.dl.dr_data = data_old; 2379 } else { 2380 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL); 2381 list_create(&dr->dt.di.dr_children, 2382 sizeof (dbuf_dirty_record_t), 2383 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 2384 } 2385 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) { 2386 dr->dr_accounted = db->db.db_size; 2387 } 2388 dr->dr_dbuf = db; 2389 dr->dr_txg = tx->tx_txg; 2390 list_insert_before(&db->db_dirty_records, dr_next, dr); 2391 2392 /* 2393 * We could have been freed_in_flight between the dbuf_noread 2394 * and dbuf_dirty. We win, as though the dbuf_noread() had 2395 * happened after the free. 2396 */ 2397 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2398 db->db_blkid != DMU_SPILL_BLKID) { 2399 mutex_enter(&dn->dn_mtx); 2400 if (dn->dn_free_ranges[txgoff] != NULL) { 2401 zfs_range_tree_clear(dn->dn_free_ranges[txgoff], 2402 db->db_blkid, 1); 2403 } 2404 mutex_exit(&dn->dn_mtx); 2405 db->db_freed_in_flight = FALSE; 2406 } 2407 2408 /* 2409 * This buffer is now part of this txg 2410 */ 2411 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 2412 db->db_dirtycnt += 1; 2413 ASSERT3U(db->db_dirtycnt, <=, 3); 2414 2415 mutex_exit(&db->db_mtx); 2416 2417 if (db->db_blkid == DMU_BONUS_BLKID || 2418 db->db_blkid == DMU_SPILL_BLKID) { 2419 mutex_enter(&dn->dn_mtx); 2420 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2421 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2422 mutex_exit(&dn->dn_mtx); 2423 dnode_setdirty(dn, tx); 2424 DB_DNODE_EXIT(db); 2425 return (dr); 2426 } 2427 2428 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 2429 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2430 drop_struct_rwlock = B_TRUE; 2431 } 2432 2433 /* 2434 * If we are overwriting a dedup BP, then unless it is snapshotted, 2435 * when we get to syncing context we will need to decrement its 2436 * refcount in the DDT. Prefetch the relevant DDT block so that 2437 * syncing context won't have to wait for the i/o. 2438 */ 2439 if (db->db_blkptr != NULL) { 2440 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG); 2441 ddt_prefetch(os->os_spa, db->db_blkptr); 2442 dmu_buf_unlock_parent(db, dblt, FTAG); 2443 } 2444 2445 /* 2446 * We need to hold the dn_struct_rwlock to make this assertion, 2447 * because it protects dn_phys / dn_next_nlevels from changing. 2448 */ 2449 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 2450 dn->dn_phys->dn_nlevels > db->db_level || 2451 dn->dn_next_nlevels[txgoff] > db->db_level || 2452 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 2453 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 2454 2455 2456 if (db->db_level == 0) { 2457 ASSERT(!db->db_objset->os_raw_receive || 2458 dn->dn_maxblkid >= db->db_blkid); 2459 dnode_new_blkid(dn, db->db_blkid, tx, 2460 drop_struct_rwlock, B_FALSE); 2461 ASSERT(dn->dn_maxblkid >= db->db_blkid); 2462 } 2463 2464 if (db->db_level+1 < dn->dn_nlevels) { 2465 dmu_buf_impl_t *parent = db->db_parent; 2466 dbuf_dirty_record_t *di; 2467 int parent_held = FALSE; 2468 2469 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 2470 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2471 parent = dbuf_hold_level(dn, db->db_level + 1, 2472 db->db_blkid >> epbs, FTAG); 2473 ASSERT(parent != NULL); 2474 parent_held = TRUE; 2475 } 2476 if (drop_struct_rwlock) 2477 rw_exit(&dn->dn_struct_rwlock); 2478 ASSERT3U(db->db_level + 1, ==, parent->db_level); 2479 di = dbuf_dirty(parent, tx); 2480 if (parent_held) 2481 dbuf_rele(parent, FTAG); 2482 2483 mutex_enter(&db->db_mtx); 2484 /* 2485 * Since we've dropped the mutex, it's possible that 2486 * dbuf_undirty() might have changed this out from under us. 2487 */ 2488 if (list_head(&db->db_dirty_records) == dr || 2489 dn->dn_object == DMU_META_DNODE_OBJECT) { 2490 mutex_enter(&di->dt.di.dr_mtx); 2491 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 2492 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2493 list_insert_tail(&di->dt.di.dr_children, dr); 2494 mutex_exit(&di->dt.di.dr_mtx); 2495 dr->dr_parent = di; 2496 } 2497 mutex_exit(&db->db_mtx); 2498 } else { 2499 ASSERT(db->db_level + 1 == dn->dn_nlevels); 2500 ASSERT(db->db_blkid < dn->dn_nblkptr); 2501 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 2502 mutex_enter(&dn->dn_mtx); 2503 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2504 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 2505 mutex_exit(&dn->dn_mtx); 2506 if (drop_struct_rwlock) 2507 rw_exit(&dn->dn_struct_rwlock); 2508 } 2509 2510 dnode_setdirty(dn, tx); 2511 DB_DNODE_EXIT(db); 2512 return (dr); 2513 } 2514 2515 static void 2516 dbuf_undirty_bonus(dbuf_dirty_record_t *dr) 2517 { 2518 dmu_buf_impl_t *db = dr->dr_dbuf; 2519 2520 ASSERT(MUTEX_HELD(&db->db_mtx)); 2521 if (dr->dt.dl.dr_data != db->db.db_data) { 2522 struct dnode *dn = dr->dr_dnode; 2523 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 2524 2525 kmem_free(dr->dt.dl.dr_data, max_bonuslen); 2526 arc_space_return(max_bonuslen, ARC_SPACE_BONUS); 2527 } 2528 db->db_data_pending = NULL; 2529 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 2530 list_remove(&db->db_dirty_records, dr); 2531 if (dr->dr_dbuf->db_level != 0) { 2532 mutex_destroy(&dr->dt.di.dr_mtx); 2533 list_destroy(&dr->dt.di.dr_children); 2534 } 2535 kmem_cache_free(dbuf_dirty_kmem_cache, dr); 2536 ASSERT3U(db->db_dirtycnt, >, 0); 2537 db->db_dirtycnt -= 1; 2538 } 2539 2540 /* 2541 * Undirty a buffer in the transaction group referenced by the given 2542 * transaction. Return whether this evicted the dbuf. 2543 */ 2544 boolean_t 2545 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 2546 { 2547 uint64_t txg = tx->tx_txg; 2548 boolean_t brtwrite; 2549 boolean_t diowrite; 2550 2551 ASSERT(txg != 0); 2552 2553 /* 2554 * Due to our use of dn_nlevels below, this can only be called 2555 * in open context, unless we are operating on the MOS. 2556 * From syncing context, dn_nlevels may be different from the 2557 * dn_nlevels used when dbuf was dirtied. 2558 */ 2559 ASSERT(db->db_objset == 2560 dmu_objset_pool(db->db_objset)->dp_meta_objset || 2561 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 2562 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2563 ASSERT0(db->db_level); 2564 ASSERT(MUTEX_HELD(&db->db_mtx)); 2565 2566 /* 2567 * If this buffer is not dirty, we're done. 2568 */ 2569 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg); 2570 if (dr == NULL) 2571 return (B_FALSE); 2572 ASSERT(dr->dr_dbuf == db); 2573 2574 brtwrite = dr->dt.dl.dr_brtwrite; 2575 diowrite = dr->dt.dl.dr_diowrite; 2576 if (brtwrite) { 2577 ASSERT3B(diowrite, ==, B_FALSE); 2578 /* 2579 * We are freeing a block that we cloned in the same 2580 * transaction group. 2581 */ 2582 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 2583 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { 2584 brt_pending_remove(dmu_objset_spa(db->db_objset), 2585 bp, tx); 2586 } 2587 } 2588 2589 dnode_t *dn = dr->dr_dnode; 2590 2591 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 2592 2593 ASSERT(db->db.db_size != 0); 2594 2595 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 2596 dr->dr_accounted, txg); 2597 2598 list_remove(&db->db_dirty_records, dr); 2599 2600 /* 2601 * Note that there are three places in dbuf_dirty() 2602 * where this dirty record may be put on a list. 2603 * Make sure to do a list_remove corresponding to 2604 * every one of those list_insert calls. 2605 */ 2606 if (dr->dr_parent) { 2607 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 2608 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 2609 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 2610 } else if (db->db_blkid == DMU_SPILL_BLKID || 2611 db->db_level + 1 == dn->dn_nlevels) { 2612 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 2613 mutex_enter(&dn->dn_mtx); 2614 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 2615 mutex_exit(&dn->dn_mtx); 2616 } 2617 2618 if (db->db_state != DB_NOFILL && !brtwrite) { 2619 dbuf_unoverride(dr); 2620 2621 if (dr->dt.dl.dr_data != db->db_buf) { 2622 ASSERT(db->db_buf != NULL); 2623 ASSERT(dr->dt.dl.dr_data != NULL); 2624 arc_buf_destroy(dr->dt.dl.dr_data, db); 2625 } 2626 } 2627 2628 kmem_cache_free(dbuf_dirty_kmem_cache, dr); 2629 2630 ASSERT(db->db_dirtycnt > 0); 2631 db->db_dirtycnt -= 1; 2632 2633 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 2634 ASSERT(db->db_state == DB_NOFILL || brtwrite || diowrite || 2635 arc_released(db->db_buf)); 2636 dbuf_destroy(db); 2637 return (B_TRUE); 2638 } 2639 2640 return (B_FALSE); 2641 } 2642 2643 void 2644 dmu_buf_will_dirty_flags(dmu_buf_t *db_fake, dmu_tx_t *tx, dmu_flags_t flags) 2645 { 2646 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2647 boolean_t undirty = B_FALSE; 2648 2649 ASSERT(tx->tx_txg != 0); 2650 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2651 2652 /* 2653 * Quick check for dirtiness to improve performance for some workloads 2654 * (e.g. file deletion with indirect blocks cached). 2655 */ 2656 mutex_enter(&db->db_mtx); 2657 if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) { 2658 /* 2659 * It's possible that the dbuf is already dirty but not cached, 2660 * because there are some calls to dbuf_dirty() that don't 2661 * go through dmu_buf_will_dirty(). 2662 */ 2663 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2664 if (dr != NULL) { 2665 if (db->db_level == 0 && 2666 dr->dt.dl.dr_brtwrite) { 2667 /* 2668 * Block cloning: If we are dirtying a cloned 2669 * level 0 block, we cannot simply redirty it, 2670 * because this dr has no associated data. 2671 * We will go through a full undirtying below, 2672 * before dirtying it again. 2673 */ 2674 undirty = B_TRUE; 2675 } else { 2676 /* This dbuf is already dirty and cached. */ 2677 dbuf_redirty(dr); 2678 mutex_exit(&db->db_mtx); 2679 return; 2680 } 2681 } 2682 } 2683 mutex_exit(&db->db_mtx); 2684 2685 DB_DNODE_ENTER(db); 2686 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 2687 flags |= DB_RF_HAVESTRUCT; 2688 DB_DNODE_EXIT(db); 2689 2690 /* 2691 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we 2692 * want to make sure dbuf_read() will read the pending cloned block and 2693 * not the uderlying block that is being replaced. dbuf_undirty() will 2694 * do brt_pending_remove() before removing the dirty record. 2695 */ 2696 (void) dbuf_read(db, NULL, flags | DB_RF_MUST_SUCCEED); 2697 if (undirty) { 2698 mutex_enter(&db->db_mtx); 2699 VERIFY(!dbuf_undirty(db, tx)); 2700 mutex_exit(&db->db_mtx); 2701 } 2702 (void) dbuf_dirty(db, tx); 2703 } 2704 2705 void 2706 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2707 { 2708 dmu_buf_will_dirty_flags(db_fake, tx, DMU_READ_NO_PREFETCH); 2709 } 2710 2711 boolean_t 2712 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2713 { 2714 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2715 dbuf_dirty_record_t *dr; 2716 2717 mutex_enter(&db->db_mtx); 2718 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2719 mutex_exit(&db->db_mtx); 2720 return (dr != NULL); 2721 } 2722 2723 /* 2724 * Normally the db_blkptr points to the most recent on-disk content for the 2725 * dbuf (and anything newer will be cached in the dbuf). However, a pending 2726 * block clone or not yet synced Direct I/O write will have a dirty record BP 2727 * pointing to the most recent data. 2728 */ 2729 int 2730 dmu_buf_get_bp_from_dbuf(dmu_buf_impl_t *db, blkptr_t **bp) 2731 { 2732 ASSERT(MUTEX_HELD(&db->db_mtx)); 2733 int error = 0; 2734 2735 if (db->db_level != 0) { 2736 *bp = db->db_blkptr; 2737 return (0); 2738 } 2739 2740 *bp = db->db_blkptr; 2741 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 2742 if (dr && db->db_state == DB_NOFILL) { 2743 /* Block clone */ 2744 if (!dr->dt.dl.dr_brtwrite) 2745 error = EIO; 2746 else 2747 *bp = &dr->dt.dl.dr_overridden_by; 2748 } else if (dr && db->db_state == DB_UNCACHED) { 2749 /* Direct I/O write */ 2750 if (dr->dt.dl.dr_diowrite) 2751 *bp = &dr->dt.dl.dr_overridden_by; 2752 } 2753 2754 return (error); 2755 } 2756 2757 /* 2758 * Direct I/O reads can read directly from the ARC, but the data has 2759 * to be untransformed in order to copy it over into user pages. 2760 */ 2761 int 2762 dmu_buf_untransform_direct(dmu_buf_impl_t *db, spa_t *spa) 2763 { 2764 int err = 0; 2765 DB_DNODE_ENTER(db); 2766 dnode_t *dn = DB_DNODE(db); 2767 2768 ASSERT3S(db->db_state, ==, DB_CACHED); 2769 ASSERT(MUTEX_HELD(&db->db_mtx)); 2770 2771 /* 2772 * Ensure that this block's dnode has been decrypted if 2773 * the caller has requested decrypted data. 2774 */ 2775 err = dbuf_read_verify_dnode_crypt(db, dn, 0); 2776 2777 /* 2778 * If the arc buf is compressed or encrypted and the caller 2779 * requested uncompressed data, we need to untransform it 2780 * before returning. We also call arc_untransform() on any 2781 * unauthenticated blocks, which will verify their MAC if 2782 * the key is now available. 2783 */ 2784 if (err == 0 && db->db_buf != NULL && 2785 (arc_is_encrypted(db->db_buf) || 2786 arc_is_unauthenticated(db->db_buf) || 2787 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 2788 zbookmark_phys_t zb; 2789 2790 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 2791 db->db.db_object, db->db_level, db->db_blkid); 2792 dbuf_fix_old_data(db, spa_syncing_txg(spa)); 2793 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 2794 dbuf_set_data(db, db->db_buf); 2795 } 2796 DB_DNODE_EXIT(db); 2797 DBUF_STAT_BUMP(hash_hits); 2798 2799 return (err); 2800 } 2801 2802 void 2803 dmu_buf_will_clone_or_dio(dmu_buf_t *db_fake, dmu_tx_t *tx) 2804 { 2805 /* 2806 * Block clones and Direct I/O writes always happen in open-context. 2807 */ 2808 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2809 ASSERT0(db->db_level); 2810 ASSERT(!dmu_tx_is_syncing(tx)); 2811 ASSERT0(db->db_level); 2812 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2813 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 2814 2815 mutex_enter(&db->db_mtx); 2816 DBUF_VERIFY(db); 2817 2818 /* 2819 * We are going to clone or issue a Direct I/O write on this block, so 2820 * undirty modifications done to this block so far in this txg. This 2821 * includes writes and clones into this block. 2822 * 2823 * If there dirty record associated with this txg from a previous Direct 2824 * I/O write then space accounting cleanup takes place. It is important 2825 * to go ahead free up the space accounting through dbuf_undirty() -> 2826 * dbuf_unoverride() -> zio_free(). Space accountiung for determining 2827 * if a write can occur in zfs_write() happens through dmu_tx_assign(). 2828 * This can cause an issue with Direct I/O writes in the case of 2829 * overwriting the same block, because all DVA allocations are being 2830 * done in open-context. Constantly allowing Direct I/O overwrites to 2831 * the same block can exhaust the pools available space leading to 2832 * ENOSPC errors at the DVA allocation part of the ZIO pipeline, which 2833 * will eventually suspend the pool. By cleaning up sapce acccounting 2834 * now, the ENOSPC error can be avoided. 2835 * 2836 * Since we are undirtying the record in open-context, we must have a 2837 * hold on the db, so it should never be evicted after calling 2838 * dbuf_undirty(). 2839 */ 2840 VERIFY3B(dbuf_undirty(db, tx), ==, B_FALSE); 2841 ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg)); 2842 2843 if (db->db_buf != NULL) { 2844 /* 2845 * If there is an associated ARC buffer with this dbuf we can 2846 * only destroy it if the previous dirty record does not 2847 * reference it. 2848 */ 2849 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 2850 if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) 2851 arc_buf_destroy(db->db_buf, db); 2852 2853 /* 2854 * Setting the dbuf's data pointers to NULL will force all 2855 * future reads down to the devices to get the most up to date 2856 * version of the data after a Direct I/O write has completed. 2857 */ 2858 db->db_buf = NULL; 2859 dbuf_clear_data(db); 2860 } 2861 2862 ASSERT3P(db->db_buf, ==, NULL); 2863 ASSERT3P(db->db.db_data, ==, NULL); 2864 2865 db->db_state = DB_NOFILL; 2866 DTRACE_SET_STATE(db, 2867 "allocating NOFILL buffer for clone or direct I/O write"); 2868 2869 DBUF_VERIFY(db); 2870 mutex_exit(&db->db_mtx); 2871 2872 dbuf_noread(db, DMU_KEEP_CACHING); 2873 (void) dbuf_dirty(db, tx); 2874 } 2875 2876 void 2877 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2878 { 2879 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2880 2881 mutex_enter(&db->db_mtx); 2882 db->db_state = DB_NOFILL; 2883 DTRACE_SET_STATE(db, "allocating NOFILL buffer"); 2884 mutex_exit(&db->db_mtx); 2885 2886 dbuf_noread(db, DMU_KEEP_CACHING); 2887 (void) dbuf_dirty(db, tx); 2888 } 2889 2890 void 2891 dmu_buf_will_fill_flags(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail, 2892 dmu_flags_t flags) 2893 { 2894 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2895 2896 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2897 ASSERT(tx->tx_txg != 0); 2898 ASSERT(db->db_level == 0); 2899 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2900 2901 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 2902 dmu_tx_private_ok(tx)); 2903 2904 mutex_enter(&db->db_mtx); 2905 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2906 if (db->db_state == DB_NOFILL || 2907 (db->db_state == DB_UNCACHED && dr && dr->dt.dl.dr_diowrite)) { 2908 /* 2909 * If the fill can fail we should have a way to return back to 2910 * the cloned or Direct I/O write data. 2911 */ 2912 if (canfail && dr) { 2913 mutex_exit(&db->db_mtx); 2914 dmu_buf_will_dirty_flags(db_fake, tx, flags); 2915 return; 2916 } 2917 /* 2918 * Block cloning: We will be completely overwriting a block 2919 * cloned in this transaction group, so let's undirty the 2920 * pending clone and mark the block as uncached. This will be 2921 * as if the clone was never done. 2922 */ 2923 if (db->db_state == DB_NOFILL) { 2924 VERIFY(!dbuf_undirty(db, tx)); 2925 db->db_state = DB_UNCACHED; 2926 } 2927 } 2928 mutex_exit(&db->db_mtx); 2929 2930 dbuf_noread(db, flags); 2931 (void) dbuf_dirty(db, tx); 2932 } 2933 2934 void 2935 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail) 2936 { 2937 dmu_buf_will_fill_flags(db_fake, tx, canfail, DMU_READ_NO_PREFETCH); 2938 } 2939 2940 /* 2941 * This function is effectively the same as dmu_buf_will_dirty(), but 2942 * indicates the caller expects raw encrypted data in the db, and provides 2943 * the crypt params (byteorder, salt, iv, mac) which should be stored in the 2944 * blkptr_t when this dbuf is written. This is only used for blocks of 2945 * dnodes, during raw receive. 2946 */ 2947 void 2948 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, 2949 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) 2950 { 2951 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2952 dbuf_dirty_record_t *dr; 2953 2954 /* 2955 * dr_has_raw_params is only processed for blocks of dnodes 2956 * (see dbuf_sync_dnode_leaf_crypt()). 2957 */ 2958 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 2959 ASSERT0(db->db_level); 2960 ASSERT(db->db_objset->os_raw_receive); 2961 2962 dmu_buf_will_dirty_flags(db_fake, tx, 2963 DMU_READ_NO_PREFETCH | DMU_READ_NO_DECRYPT); 2964 2965 dr = dbuf_find_dirty_eq(db, tx->tx_txg); 2966 2967 ASSERT3P(dr, !=, NULL); 2968 ASSERT3U(dr->dt.dl.dr_override_state, ==, DR_NOT_OVERRIDDEN); 2969 2970 dr->dt.dl.dr_has_raw_params = B_TRUE; 2971 dr->dt.dl.dr_byteorder = byteorder; 2972 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN); 2973 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN); 2974 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN); 2975 } 2976 2977 static void 2978 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx) 2979 { 2980 struct dirty_leaf *dl; 2981 dbuf_dirty_record_t *dr; 2982 2983 ASSERT3U(db->db.db_object, !=, DMU_META_DNODE_OBJECT); 2984 ASSERT0(db->db_level); 2985 2986 dr = list_head(&db->db_dirty_records); 2987 ASSERT3P(dr, !=, NULL); 2988 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2989 dl = &dr->dt.dl; 2990 ASSERT0(dl->dr_has_raw_params); 2991 dl->dr_overridden_by = *bp; 2992 dl->dr_override_state = DR_OVERRIDDEN; 2993 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg); 2994 } 2995 2996 boolean_t 2997 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed) 2998 { 2999 (void) tx; 3000 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 3001 mutex_enter(&db->db_mtx); 3002 DBUF_VERIFY(db); 3003 3004 if (db->db_state == DB_FILL) { 3005 if (db->db_level == 0 && db->db_freed_in_flight) { 3006 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3007 /* we were freed while filling */ 3008 /* XXX dbuf_undirty? */ 3009 memset(db->db.db_data, 0, db->db.db_size); 3010 db->db_freed_in_flight = FALSE; 3011 db->db_state = DB_CACHED; 3012 DTRACE_SET_STATE(db, 3013 "fill done handling freed in flight"); 3014 failed = B_FALSE; 3015 } else if (failed) { 3016 VERIFY(!dbuf_undirty(db, tx)); 3017 arc_buf_destroy(db->db_buf, db); 3018 db->db_buf = NULL; 3019 dbuf_clear_data(db); 3020 DTRACE_SET_STATE(db, "fill failed"); 3021 } else { 3022 db->db_state = DB_CACHED; 3023 DTRACE_SET_STATE(db, "fill done"); 3024 } 3025 cv_broadcast(&db->db_changed); 3026 } else { 3027 db->db_state = DB_CACHED; 3028 failed = B_FALSE; 3029 } 3030 mutex_exit(&db->db_mtx); 3031 return (failed); 3032 } 3033 3034 void 3035 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 3036 bp_embedded_type_t etype, enum zio_compress comp, 3037 int uncompressed_size, int compressed_size, int byteorder, 3038 dmu_tx_t *tx) 3039 { 3040 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 3041 struct dirty_leaf *dl; 3042 dmu_object_type_t type; 3043 dbuf_dirty_record_t *dr; 3044 3045 if (etype == BP_EMBEDDED_TYPE_DATA) { 3046 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 3047 SPA_FEATURE_EMBEDDED_DATA)); 3048 } 3049 3050 DB_DNODE_ENTER(db); 3051 type = DB_DNODE(db)->dn_type; 3052 DB_DNODE_EXIT(db); 3053 3054 ASSERT0(db->db_level); 3055 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3056 3057 dmu_buf_will_not_fill(dbuf, tx); 3058 3059 dr = list_head(&db->db_dirty_records); 3060 ASSERT3P(dr, !=, NULL); 3061 ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 3062 dl = &dr->dt.dl; 3063 ASSERT0(dl->dr_has_raw_params); 3064 encode_embedded_bp_compressed(&dl->dr_overridden_by, 3065 data, comp, uncompressed_size, compressed_size); 3066 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 3067 BP_SET_TYPE(&dl->dr_overridden_by, type); 3068 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 3069 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 3070 3071 dl->dr_override_state = DR_OVERRIDDEN; 3072 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg); 3073 } 3074 3075 void 3076 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx) 3077 { 3078 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 3079 dmu_object_type_t type; 3080 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset, 3081 SPA_FEATURE_REDACTED_DATASETS)); 3082 3083 DB_DNODE_ENTER(db); 3084 type = DB_DNODE(db)->dn_type; 3085 DB_DNODE_EXIT(db); 3086 3087 ASSERT0(db->db_level); 3088 dmu_buf_will_not_fill(dbuf, tx); 3089 3090 blkptr_t bp = { { { {0} } } }; 3091 BP_SET_TYPE(&bp, type); 3092 BP_SET_LEVEL(&bp, 0); 3093 BP_SET_BIRTH(&bp, tx->tx_txg, 0); 3094 BP_SET_REDACTED(&bp); 3095 BPE_SET_LSIZE(&bp, dbuf->db_size); 3096 3097 dbuf_override_impl(db, &bp, tx); 3098 } 3099 3100 /* 3101 * Directly assign a provided arc buf to a given dbuf if it's not referenced 3102 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 3103 */ 3104 void 3105 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx, 3106 dmu_flags_t flags) 3107 { 3108 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 3109 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3110 ASSERT(db->db_level == 0); 3111 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 3112 ASSERT(buf != NULL); 3113 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); 3114 ASSERT(tx->tx_txg != 0); 3115 3116 arc_return_buf(buf, db); 3117 ASSERT(arc_released(buf)); 3118 3119 mutex_enter(&db->db_mtx); 3120 if (!(flags & (DMU_UNCACHEDIO | DMU_KEEP_CACHING))) 3121 db->db_pending_evict = B_FALSE; 3122 db->db_partial_read = B_FALSE; 3123 3124 while (db->db_state == DB_READ || db->db_state == DB_FILL) 3125 cv_wait(&db->db_changed, &db->db_mtx); 3126 3127 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED || 3128 db->db_state == DB_NOFILL); 3129 3130 if (db->db_state == DB_CACHED && 3131 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 3132 /* 3133 * In practice, we will never have a case where we have an 3134 * encrypted arc buffer while additional holds exist on the 3135 * dbuf. We don't handle this here so we simply assert that 3136 * fact instead. 3137 */ 3138 ASSERT(!arc_is_encrypted(buf)); 3139 mutex_exit(&db->db_mtx); 3140 (void) dbuf_dirty(db, tx); 3141 memcpy(db->db.db_data, buf->b_data, db->db.db_size); 3142 arc_buf_destroy(buf, db); 3143 return; 3144 } 3145 3146 if (db->db_state == DB_CACHED) { 3147 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records); 3148 3149 ASSERT(db->db_buf != NULL); 3150 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 3151 ASSERT(dr->dt.dl.dr_data == db->db_buf); 3152 3153 if (!arc_released(db->db_buf)) { 3154 ASSERT(dr->dt.dl.dr_override_state == 3155 DR_OVERRIDDEN); 3156 arc_release(db->db_buf, db); 3157 } 3158 dr->dt.dl.dr_data = buf; 3159 arc_buf_destroy(db->db_buf, db); 3160 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 3161 arc_release(db->db_buf, db); 3162 arc_buf_destroy(db->db_buf, db); 3163 } 3164 db->db_buf = NULL; 3165 } else if (db->db_state == DB_NOFILL) { 3166 /* 3167 * We will be completely replacing the cloned block. In case 3168 * it was cloned in this transaction group, let's undirty the 3169 * pending clone and mark the block as uncached. This will be 3170 * as if the clone was never done. 3171 */ 3172 VERIFY(!dbuf_undirty(db, tx)); 3173 db->db_state = DB_UNCACHED; 3174 } 3175 ASSERT(db->db_buf == NULL); 3176 dbuf_set_data(db, buf); 3177 db->db_state = DB_FILL; 3178 DTRACE_SET_STATE(db, "filling assigned arcbuf"); 3179 mutex_exit(&db->db_mtx); 3180 (void) dbuf_dirty(db, tx); 3181 dmu_buf_fill_done(&db->db, tx, B_FALSE); 3182 } 3183 3184 void 3185 dbuf_destroy(dmu_buf_impl_t *db) 3186 { 3187 dnode_t *dn; 3188 dmu_buf_impl_t *parent = db->db_parent; 3189 dmu_buf_impl_t *dndb; 3190 3191 ASSERT(MUTEX_HELD(&db->db_mtx)); 3192 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3193 3194 if (db->db_buf != NULL) { 3195 arc_buf_destroy(db->db_buf, db); 3196 db->db_buf = NULL; 3197 } 3198 3199 if (db->db_blkid == DMU_BONUS_BLKID) { 3200 int slots = DB_DNODE(db)->dn_num_slots; 3201 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 3202 if (db->db.db_data != NULL) { 3203 kmem_free(db->db.db_data, bonuslen); 3204 arc_space_return(bonuslen, ARC_SPACE_BONUS); 3205 db->db_state = DB_UNCACHED; 3206 DTRACE_SET_STATE(db, "buffer cleared"); 3207 } 3208 } 3209 3210 dbuf_clear_data(db); 3211 3212 if (multilist_link_active(&db->db_cache_link)) { 3213 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 3214 db->db_caching_status == DB_DBUF_METADATA_CACHE); 3215 3216 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 3217 3218 ASSERT0(dmu_buf_user_size(&db->db)); 3219 (void) zfs_refcount_remove_many( 3220 &dbuf_caches[db->db_caching_status].size, 3221 db->db.db_size, db); 3222 3223 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 3224 DBUF_STAT_BUMPDOWN(metadata_cache_count); 3225 } else { 3226 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 3227 DBUF_STAT_BUMPDOWN(cache_count); 3228 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 3229 db->db.db_size); 3230 } 3231 db->db_caching_status = DB_NO_CACHE; 3232 } 3233 3234 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 3235 ASSERT(db->db_data_pending == NULL); 3236 ASSERT(list_is_empty(&db->db_dirty_records)); 3237 3238 db->db_state = DB_EVICTING; 3239 DTRACE_SET_STATE(db, "buffer eviction started"); 3240 db->db_blkptr = NULL; 3241 3242 /* 3243 * Now that db_state is DB_EVICTING, nobody else can find this via 3244 * the hash table. We can now drop db_mtx, which allows us to 3245 * acquire the dn_dbufs_mtx. 3246 */ 3247 mutex_exit(&db->db_mtx); 3248 3249 DB_DNODE_ENTER(db); 3250 dn = DB_DNODE(db); 3251 dndb = dn->dn_dbuf; 3252 if (db->db_blkid != DMU_BONUS_BLKID) { 3253 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 3254 if (needlock) 3255 mutex_enter_nested(&dn->dn_dbufs_mtx, 3256 NESTED_SINGLE); 3257 avl_remove(&dn->dn_dbufs, db); 3258 membar_producer(); 3259 DB_DNODE_EXIT(db); 3260 if (needlock) 3261 mutex_exit(&dn->dn_dbufs_mtx); 3262 /* 3263 * Decrementing the dbuf count means that the hold corresponding 3264 * to the removed dbuf is no longer discounted in dnode_move(), 3265 * so the dnode cannot be moved until after we release the hold. 3266 * The membar_producer() ensures visibility of the decremented 3267 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 3268 * release any lock. 3269 */ 3270 mutex_enter(&dn->dn_mtx); 3271 dnode_rele_and_unlock(dn, db, B_TRUE); 3272 #ifdef USE_DNODE_HANDLE 3273 db->db_dnode_handle = NULL; 3274 #else 3275 db->db_dnode = NULL; 3276 #endif 3277 3278 dbuf_hash_remove(db); 3279 } else { 3280 DB_DNODE_EXIT(db); 3281 } 3282 3283 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3284 3285 db->db_parent = NULL; 3286 3287 ASSERT(db->db_buf == NULL); 3288 ASSERT(db->db.db_data == NULL); 3289 ASSERT(db->db_hash_next == NULL); 3290 ASSERT(db->db_blkptr == NULL); 3291 ASSERT(db->db_data_pending == NULL); 3292 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 3293 ASSERT(!multilist_link_active(&db->db_cache_link)); 3294 3295 /* 3296 * If this dbuf is referenced from an indirect dbuf, 3297 * decrement the ref count on the indirect dbuf. 3298 */ 3299 if (parent && parent != dndb) { 3300 mutex_enter(&parent->db_mtx); 3301 dbuf_rele_and_unlock(parent, db, B_TRUE); 3302 } 3303 3304 kmem_cache_free(dbuf_kmem_cache, db); 3305 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3306 } 3307 3308 /* 3309 * Note: While bpp will always be updated if the function returns success, 3310 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 3311 * this happens when the dnode is the meta-dnode, or {user|group|project}used 3312 * object. 3313 */ 3314 __attribute__((always_inline)) 3315 static inline int 3316 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 3317 dmu_buf_impl_t **parentp, blkptr_t **bpp) 3318 { 3319 *parentp = NULL; 3320 *bpp = NULL; 3321 3322 ASSERT(blkid != DMU_BONUS_BLKID); 3323 3324 if (blkid == DMU_SPILL_BLKID) { 3325 mutex_enter(&dn->dn_mtx); 3326 if (dn->dn_have_spill && 3327 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 3328 *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 3329 else 3330 *bpp = NULL; 3331 dbuf_add_ref(dn->dn_dbuf, NULL); 3332 *parentp = dn->dn_dbuf; 3333 mutex_exit(&dn->dn_mtx); 3334 return (0); 3335 } 3336 3337 int nlevels = 3338 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 3339 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 3340 3341 ASSERT3U(level * epbs, <, 64); 3342 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3343 /* 3344 * This assertion shouldn't trip as long as the max indirect block size 3345 * is less than 1M. The reason for this is that up to that point, 3346 * the number of levels required to address an entire object with blocks 3347 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 3348 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 3349 * (i.e. we can address the entire object), objects will all use at most 3350 * N-1 levels and the assertion won't overflow. However, once epbs is 3351 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 3352 * enough to address an entire object, so objects will have 5 levels, 3353 * but then this assertion will overflow. 3354 * 3355 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 3356 * need to redo this logic to handle overflows. 3357 */ 3358 ASSERT(level >= nlevels || 3359 ((nlevels - level - 1) * epbs) + 3360 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 3361 if (level >= nlevels || 3362 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 3363 ((nlevels - level - 1) * epbs)) || 3364 (fail_sparse && 3365 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 3366 /* the buffer has no parent yet */ 3367 return (SET_ERROR(ENOENT)); 3368 } else if (level < nlevels-1) { 3369 /* this block is referenced from an indirect block */ 3370 int err; 3371 3372 err = dbuf_hold_impl(dn, level + 1, 3373 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 3374 3375 if (err) 3376 return (err); 3377 err = dbuf_read(*parentp, NULL, DB_RF_CANFAIL | 3378 DB_RF_HAVESTRUCT | DMU_READ_NO_PREFETCH); 3379 if (err) { 3380 dbuf_rele(*parentp, NULL); 3381 *parentp = NULL; 3382 return (err); 3383 } 3384 rw_enter(&(*parentp)->db_rwlock, RW_READER); 3385 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 3386 (blkid & ((1ULL << epbs) - 1)); 3387 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 3388 ASSERT(BP_IS_HOLE(*bpp)); 3389 rw_exit(&(*parentp)->db_rwlock); 3390 return (0); 3391 } else { 3392 /* the block is referenced from the dnode */ 3393 ASSERT3U(level, ==, nlevels-1); 3394 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 3395 blkid < dn->dn_phys->dn_nblkptr); 3396 if (dn->dn_dbuf) { 3397 dbuf_add_ref(dn->dn_dbuf, NULL); 3398 *parentp = dn->dn_dbuf; 3399 } 3400 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 3401 return (0); 3402 } 3403 } 3404 3405 static dmu_buf_impl_t * 3406 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 3407 dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash) 3408 { 3409 objset_t *os = dn->dn_objset; 3410 dmu_buf_impl_t *db, *odb; 3411 3412 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3413 ASSERT(dn->dn_type != DMU_OT_NONE); 3414 3415 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 3416 3417 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t), 3418 offsetof(dbuf_dirty_record_t, dr_dbuf_node)); 3419 3420 db->db_objset = os; 3421 db->db.db_object = dn->dn_object; 3422 db->db_level = level; 3423 db->db_blkid = blkid; 3424 db->db_dirtycnt = 0; 3425 #ifdef USE_DNODE_HANDLE 3426 db->db_dnode_handle = dn->dn_handle; 3427 #else 3428 db->db_dnode = dn; 3429 #endif 3430 db->db_parent = parent; 3431 db->db_blkptr = blkptr; 3432 db->db_hash = hash; 3433 3434 db->db_user = NULL; 3435 db->db_user_immediate_evict = FALSE; 3436 db->db_freed_in_flight = FALSE; 3437 db->db_pending_evict = TRUE; 3438 db->db_partial_read = FALSE; 3439 3440 if (blkid == DMU_BONUS_BLKID) { 3441 ASSERT3P(parent, ==, dn->dn_dbuf); 3442 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 3443 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 3444 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 3445 db->db.db_offset = DMU_BONUS_BLKID; 3446 db->db_state = DB_UNCACHED; 3447 DTRACE_SET_STATE(db, "bonus buffer created"); 3448 db->db_caching_status = DB_NO_CACHE; 3449 /* the bonus dbuf is not placed in the hash table */ 3450 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3451 return (db); 3452 } else if (blkid == DMU_SPILL_BLKID) { 3453 db->db.db_size = (blkptr != NULL) ? 3454 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 3455 db->db.db_offset = 0; 3456 } else { 3457 int blocksize = 3458 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 3459 db->db.db_size = blocksize; 3460 db->db.db_offset = db->db_blkid * blocksize; 3461 } 3462 3463 /* 3464 * Hold the dn_dbufs_mtx while we get the new dbuf 3465 * in the hash table *and* added to the dbufs list. 3466 * This prevents a possible deadlock with someone 3467 * trying to look up this dbuf before it's added to the 3468 * dn_dbufs list. 3469 */ 3470 mutex_enter(&dn->dn_dbufs_mtx); 3471 db->db_state = DB_EVICTING; /* not worth logging this state change */ 3472 if ((odb = dbuf_hash_insert(db)) != NULL) { 3473 /* someone else inserted it first */ 3474 mutex_exit(&dn->dn_dbufs_mtx); 3475 kmem_cache_free(dbuf_kmem_cache, db); 3476 DBUF_STAT_BUMP(hash_insert_race); 3477 return (odb); 3478 } 3479 avl_add(&dn->dn_dbufs, db); 3480 3481 db->db_state = DB_UNCACHED; 3482 DTRACE_SET_STATE(db, "regular buffer created"); 3483 db->db_caching_status = DB_NO_CACHE; 3484 mutex_exit(&dn->dn_dbufs_mtx); 3485 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF); 3486 3487 if (parent && parent != dn->dn_dbuf) 3488 dbuf_add_ref(parent, db); 3489 3490 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 3491 zfs_refcount_count(&dn->dn_holds) > 0); 3492 (void) zfs_refcount_add(&dn->dn_holds, db); 3493 3494 dprintf_dbuf(db, "db=%p\n", db); 3495 3496 return (db); 3497 } 3498 3499 /* 3500 * This function returns a block pointer and information about the object, 3501 * given a dnode and a block. This is a publicly accessible version of 3502 * dbuf_findbp that only returns some information, rather than the 3503 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock 3504 * should be locked as (at least) a reader. 3505 */ 3506 int 3507 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid, 3508 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift) 3509 { 3510 dmu_buf_impl_t *dbp = NULL; 3511 blkptr_t *bp2; 3512 int err = 0; 3513 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3514 3515 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2); 3516 if (err == 0) { 3517 ASSERT3P(bp2, !=, NULL); 3518 *bp = *bp2; 3519 if (dbp != NULL) 3520 dbuf_rele(dbp, NULL); 3521 if (datablkszsec != NULL) 3522 *datablkszsec = dn->dn_phys->dn_datablkszsec; 3523 if (indblkshift != NULL) 3524 *indblkshift = dn->dn_phys->dn_indblkshift; 3525 } 3526 3527 return (err); 3528 } 3529 3530 typedef struct dbuf_prefetch_arg { 3531 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 3532 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 3533 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 3534 int dpa_curlevel; /* The current level that we're reading */ 3535 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 3536 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 3537 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 3538 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 3539 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */ 3540 void *dpa_arg; /* prefetch completion arg */ 3541 } dbuf_prefetch_arg_t; 3542 3543 static void 3544 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done) 3545 { 3546 if (dpa->dpa_cb != NULL) { 3547 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level, 3548 dpa->dpa_zb.zb_blkid, io_done); 3549 } 3550 kmem_free(dpa, sizeof (*dpa)); 3551 } 3552 3553 static void 3554 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb, 3555 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3556 { 3557 (void) zio, (void) zb, (void) iobp; 3558 dbuf_prefetch_arg_t *dpa = private; 3559 3560 if (abuf != NULL) 3561 arc_buf_destroy(abuf, private); 3562 3563 dbuf_prefetch_fini(dpa, B_TRUE); 3564 } 3565 3566 /* 3567 * Actually issue the prefetch read for the block given. 3568 */ 3569 static void 3570 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 3571 { 3572 ASSERT(!BP_IS_HOLE(bp)); 3573 ASSERT(!BP_IS_REDACTED(bp)); 3574 if (BP_IS_EMBEDDED(bp)) 3575 return (dbuf_prefetch_fini(dpa, B_FALSE)); 3576 3577 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 3578 arc_flags_t aflags = 3579 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH | 3580 ARC_FLAG_NO_BUF; 3581 3582 /* dnodes are always read as raw and then converted later */ 3583 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && 3584 dpa->dpa_curlevel == 0) 3585 zio_flags |= ZIO_FLAG_RAW; 3586 3587 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3588 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 3589 ASSERT(dpa->dpa_zio != NULL); 3590 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, 3591 dbuf_issue_final_prefetch_done, dpa, 3592 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); 3593 } 3594 3595 /* 3596 * Called when an indirect block above our prefetch target is read in. This 3597 * will either read in the next indirect block down the tree or issue the actual 3598 * prefetch if the next block down is our target. 3599 */ 3600 static void 3601 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 3602 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 3603 { 3604 (void) zb, (void) iobp; 3605 dbuf_prefetch_arg_t *dpa = private; 3606 3607 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 3608 ASSERT3S(dpa->dpa_curlevel, >, 0); 3609 3610 if (abuf == NULL) { 3611 ASSERT(zio == NULL || zio->io_error != 0); 3612 dbuf_prefetch_fini(dpa, B_TRUE); 3613 return; 3614 } 3615 ASSERT(zio == NULL || zio->io_error == 0); 3616 3617 /* 3618 * The dpa_dnode is only valid if we are called with a NULL 3619 * zio. This indicates that the arc_read() returned without 3620 * first calling zio_read() to issue a physical read. Once 3621 * a physical read is made the dpa_dnode must be invalidated 3622 * as the locks guarding it may have been dropped. If the 3623 * dpa_dnode is still valid, then we want to add it to the dbuf 3624 * cache. To do so, we must hold the dbuf associated with the block 3625 * we just prefetched, read its contents so that we associate it 3626 * with an arc_buf_t, and then release it. 3627 */ 3628 if (zio != NULL) { 3629 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 3630 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { 3631 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 3632 } else { 3633 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 3634 } 3635 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 3636 3637 dpa->dpa_dnode = NULL; 3638 } else if (dpa->dpa_dnode != NULL) { 3639 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 3640 (dpa->dpa_epbs * (dpa->dpa_curlevel - 3641 dpa->dpa_zb.zb_level)); 3642 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 3643 dpa->dpa_curlevel, curblkid, FTAG); 3644 if (db == NULL) { 3645 arc_buf_destroy(abuf, private); 3646 dbuf_prefetch_fini(dpa, B_TRUE); 3647 return; 3648 } 3649 (void) dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT | 3650 DMU_READ_NO_PREFETCH); 3651 dbuf_rele(db, FTAG); 3652 } 3653 3654 dpa->dpa_curlevel--; 3655 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 3656 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 3657 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 3658 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 3659 3660 ASSERT(!BP_IS_REDACTED(bp) || dpa->dpa_dnode == NULL || 3661 dsl_dataset_feature_is_active( 3662 dpa->dpa_dnode->dn_objset->os_dsl_dataset, 3663 SPA_FEATURE_REDACTED_DATASETS)); 3664 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) { 3665 arc_buf_destroy(abuf, private); 3666 dbuf_prefetch_fini(dpa, B_TRUE); 3667 return; 3668 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 3669 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 3670 dbuf_issue_final_prefetch(dpa, bp); 3671 } else { 3672 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3673 zbookmark_phys_t zb; 3674 3675 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3676 if (dpa->dpa_dnode) { 3677 if (dnode_level_is_l2cacheable(bp, dpa->dpa_dnode, 3678 dpa->dpa_curlevel)) 3679 iter_aflags |= ARC_FLAG_L2CACHE; 3680 } else { 3681 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 3682 iter_aflags |= ARC_FLAG_L2CACHE; 3683 } 3684 3685 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 3686 3687 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 3688 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 3689 3690 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3691 bp, dbuf_prefetch_indirect_done, dpa, 3692 ZIO_PRIORITY_SYNC_READ, 3693 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3694 &iter_aflags, &zb); 3695 } 3696 3697 arc_buf_destroy(abuf, private); 3698 } 3699 3700 /* 3701 * Issue prefetch reads for the given block on the given level. If the indirect 3702 * blocks above that block are not in memory, we will read them in 3703 * asynchronously. As a result, this call never blocks waiting for a read to 3704 * complete. Note that the prefetch might fail if the dataset is encrypted and 3705 * the encryption key is unmapped before the IO completes. 3706 */ 3707 int 3708 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid, 3709 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb, 3710 void *arg) 3711 { 3712 blkptr_t bp; 3713 int epbs, nlevels, curlevel; 3714 uint64_t curblkid; 3715 3716 ASSERT(blkid != DMU_BONUS_BLKID); 3717 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3718 3719 if (blkid > dn->dn_maxblkid) 3720 goto no_issue; 3721 3722 if (level == 0 && dnode_block_freed(dn, blkid)) 3723 goto no_issue; 3724 3725 /* 3726 * This dnode hasn't been written to disk yet, so there's nothing to 3727 * prefetch. 3728 */ 3729 nlevels = dn->dn_phys->dn_nlevels; 3730 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 3731 goto no_issue; 3732 3733 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3734 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 3735 goto no_issue; 3736 3737 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 3738 level, blkid, NULL); 3739 if (db != NULL) { 3740 mutex_exit(&db->db_mtx); 3741 /* 3742 * This dbuf already exists. It is either CACHED, or 3743 * (we assume) about to be read or filled. 3744 */ 3745 goto no_issue; 3746 } 3747 3748 /* 3749 * Find the closest ancestor (indirect block) of the target block 3750 * that is present in the cache. In this indirect block, we will 3751 * find the bp that is at curlevel, curblkid. 3752 */ 3753 curlevel = level; 3754 curblkid = blkid; 3755 while (curlevel < nlevels - 1) { 3756 int parent_level = curlevel + 1; 3757 uint64_t parent_blkid = curblkid >> epbs; 3758 dmu_buf_impl_t *db; 3759 3760 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 3761 FALSE, TRUE, FTAG, &db) == 0) { 3762 blkptr_t *bpp = db->db_buf->b_data; 3763 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 3764 dbuf_rele(db, FTAG); 3765 break; 3766 } 3767 3768 curlevel = parent_level; 3769 curblkid = parent_blkid; 3770 } 3771 3772 if (curlevel == nlevels - 1) { 3773 /* No cached indirect blocks found. */ 3774 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 3775 bp = dn->dn_phys->dn_blkptr[curblkid]; 3776 } 3777 ASSERT(!BP_IS_REDACTED(&bp) || 3778 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset, 3779 SPA_FEATURE_REDACTED_DATASETS)); 3780 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp)) 3781 goto no_issue; 3782 3783 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 3784 3785 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 3786 ZIO_FLAG_CANFAIL); 3787 3788 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 3789 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 3790 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3791 dn->dn_object, level, blkid); 3792 dpa->dpa_curlevel = curlevel; 3793 dpa->dpa_prio = prio; 3794 dpa->dpa_aflags = aflags; 3795 dpa->dpa_spa = dn->dn_objset->os_spa; 3796 dpa->dpa_dnode = dn; 3797 dpa->dpa_epbs = epbs; 3798 dpa->dpa_zio = pio; 3799 dpa->dpa_cb = cb; 3800 dpa->dpa_arg = arg; 3801 3802 if (!DNODE_LEVEL_IS_CACHEABLE(dn, level)) 3803 dpa->dpa_aflags |= ARC_FLAG_UNCACHED; 3804 else if (dnode_level_is_l2cacheable(&bp, dn, level)) 3805 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 3806 3807 /* 3808 * If we have the indirect just above us, no need to do the asynchronous 3809 * prefetch chain; we'll just run the last step ourselves. If we're at 3810 * a higher level, though, we want to issue the prefetches for all the 3811 * indirect blocks asynchronously, so we can go on with whatever we were 3812 * doing. 3813 */ 3814 if (curlevel == level) { 3815 ASSERT3U(curblkid, ==, blkid); 3816 dbuf_issue_final_prefetch(dpa, &bp); 3817 } else { 3818 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 3819 zbookmark_phys_t zb; 3820 3821 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 3822 if (dnode_level_is_l2cacheable(&bp, dn, curlevel)) 3823 iter_aflags |= ARC_FLAG_L2CACHE; 3824 3825 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 3826 dn->dn_object, curlevel, curblkid); 3827 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 3828 &bp, dbuf_prefetch_indirect_done, dpa, 3829 ZIO_PRIORITY_SYNC_READ, 3830 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 3831 &iter_aflags, &zb); 3832 } 3833 /* 3834 * We use pio here instead of dpa_zio since it's possible that 3835 * dpa may have already been freed. 3836 */ 3837 zio_nowait(pio); 3838 return (1); 3839 no_issue: 3840 if (cb != NULL) 3841 cb(arg, level, blkid, B_FALSE); 3842 return (0); 3843 } 3844 3845 int 3846 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 3847 arc_flags_t aflags) 3848 { 3849 3850 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL)); 3851 } 3852 3853 /* 3854 * Helper function for dbuf_hold_impl() to copy a buffer. Handles 3855 * the case of encrypted, compressed and uncompressed buffers by 3856 * allocating the new buffer, respectively, with arc_alloc_raw_buf(), 3857 * arc_alloc_compressed_buf() or arc_alloc_buf().* 3858 * 3859 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl(). 3860 */ 3861 noinline static void 3862 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db) 3863 { 3864 dbuf_dirty_record_t *dr = db->db_data_pending; 3865 arc_buf_t *data = dr->dt.dl.dr_data; 3866 arc_buf_t *db_data; 3867 enum zio_compress compress_type = arc_get_compression(data); 3868 uint8_t complevel = arc_get_complevel(data); 3869 3870 if (arc_is_encrypted(data)) { 3871 boolean_t byteorder; 3872 uint8_t salt[ZIO_DATA_SALT_LEN]; 3873 uint8_t iv[ZIO_DATA_IV_LEN]; 3874 uint8_t mac[ZIO_DATA_MAC_LEN]; 3875 3876 arc_get_raw_params(data, &byteorder, salt, iv, mac); 3877 db_data = arc_alloc_raw_buf(dn->dn_objset->os_spa, db, 3878 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, 3879 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), 3880 compress_type, complevel); 3881 } else if (compress_type != ZIO_COMPRESS_OFF) { 3882 db_data = arc_alloc_compressed_buf( 3883 dn->dn_objset->os_spa, db, arc_buf_size(data), 3884 arc_buf_lsize(data), compress_type, complevel); 3885 } else { 3886 db_data = arc_alloc_buf(dn->dn_objset->os_spa, db, 3887 DBUF_GET_BUFC_TYPE(db), db->db.db_size); 3888 } 3889 memcpy(db_data->b_data, data->b_data, arc_buf_size(data)); 3890 3891 dbuf_set_data(db, db_data); 3892 } 3893 3894 /* 3895 * Returns with db_holds incremented, and db_mtx not held. 3896 * Note: dn_struct_rwlock must be held. 3897 */ 3898 int 3899 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 3900 boolean_t fail_sparse, boolean_t fail_uncached, 3901 const void *tag, dmu_buf_impl_t **dbp) 3902 { 3903 dmu_buf_impl_t *db, *parent = NULL; 3904 uint64_t hv; 3905 3906 /* If the pool has been created, verify the tx_sync_lock is not held */ 3907 spa_t *spa = dn->dn_objset->os_spa; 3908 dsl_pool_t *dp = spa->spa_dsl_pool; 3909 if (dp != NULL) { 3910 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock)); 3911 } 3912 3913 ASSERT(blkid != DMU_BONUS_BLKID); 3914 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 3915 ASSERT3U(dn->dn_nlevels, >, level); 3916 3917 *dbp = NULL; 3918 3919 /* dbuf_find() returns with db_mtx held */ 3920 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv); 3921 3922 if (db == NULL) { 3923 blkptr_t *bp = NULL; 3924 int err; 3925 3926 if (fail_uncached) 3927 return (SET_ERROR(ENOENT)); 3928 3929 ASSERT3P(parent, ==, NULL); 3930 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 3931 if (fail_sparse) { 3932 if (err == 0 && bp && BP_IS_HOLE(bp)) 3933 err = SET_ERROR(ENOENT); 3934 if (err) { 3935 if (parent) 3936 dbuf_rele(parent, NULL); 3937 return (err); 3938 } 3939 } 3940 if (err && err != ENOENT) 3941 return (err); 3942 db = dbuf_create(dn, level, blkid, parent, bp, hv); 3943 } 3944 3945 if (fail_uncached && db->db_state != DB_CACHED) { 3946 mutex_exit(&db->db_mtx); 3947 return (SET_ERROR(ENOENT)); 3948 } 3949 3950 if (db->db_buf != NULL) { 3951 arc_buf_access(db->db_buf); 3952 ASSERT(MUTEX_HELD(&db->db_mtx)); 3953 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 3954 } 3955 3956 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 3957 3958 /* 3959 * If this buffer is currently syncing out, and we are 3960 * still referencing it from db_data, we need to make a copy 3961 * of it in case we decide we want to dirty it again in this txg. 3962 */ 3963 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 3964 dn->dn_object != DMU_META_DNODE_OBJECT && 3965 db->db_state == DB_CACHED && db->db_data_pending) { 3966 dbuf_dirty_record_t *dr = db->db_data_pending; 3967 if (dr->dt.dl.dr_data == db->db_buf) { 3968 ASSERT3P(db->db_buf, !=, NULL); 3969 dbuf_hold_copy(dn, db); 3970 } 3971 } 3972 3973 if (multilist_link_active(&db->db_cache_link)) { 3974 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 3975 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 3976 db->db_caching_status == DB_DBUF_METADATA_CACHE); 3977 3978 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db); 3979 3980 uint64_t size = db->db.db_size; 3981 uint64_t usize = dmu_buf_user_size(&db->db); 3982 (void) zfs_refcount_remove_many( 3983 &dbuf_caches[db->db_caching_status].size, size, db); 3984 (void) zfs_refcount_remove_many( 3985 &dbuf_caches[db->db_caching_status].size, usize, 3986 db->db_user); 3987 3988 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) { 3989 DBUF_STAT_BUMPDOWN(metadata_cache_count); 3990 } else { 3991 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]); 3992 DBUF_STAT_BUMPDOWN(cache_count); 3993 DBUF_STAT_DECR(cache_levels_bytes[db->db_level], 3994 size + usize); 3995 } 3996 db->db_caching_status = DB_NO_CACHE; 3997 } 3998 (void) zfs_refcount_add(&db->db_holds, tag); 3999 DBUF_VERIFY(db); 4000 mutex_exit(&db->db_mtx); 4001 4002 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 4003 if (parent) 4004 dbuf_rele(parent, NULL); 4005 4006 ASSERT3P(DB_DNODE(db), ==, dn); 4007 ASSERT3U(db->db_blkid, ==, blkid); 4008 ASSERT3U(db->db_level, ==, level); 4009 *dbp = db; 4010 4011 return (0); 4012 } 4013 4014 dmu_buf_impl_t * 4015 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag) 4016 { 4017 return (dbuf_hold_level(dn, 0, blkid, tag)); 4018 } 4019 4020 dmu_buf_impl_t * 4021 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag) 4022 { 4023 dmu_buf_impl_t *db; 4024 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 4025 return (err ? NULL : db); 4026 } 4027 4028 void 4029 dbuf_create_bonus(dnode_t *dn) 4030 { 4031 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 4032 4033 ASSERT(dn->dn_bonus == NULL); 4034 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL, 4035 dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID)); 4036 dn->dn_bonus->db_pending_evict = FALSE; 4037 } 4038 4039 int 4040 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 4041 { 4042 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4043 4044 if (db->db_blkid != DMU_SPILL_BLKID) 4045 return (SET_ERROR(ENOTSUP)); 4046 if (blksz == 0) 4047 blksz = SPA_MINBLOCKSIZE; 4048 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 4049 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 4050 4051 dbuf_new_size(db, blksz, tx); 4052 4053 return (0); 4054 } 4055 4056 void 4057 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 4058 { 4059 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 4060 } 4061 4062 #pragma weak dmu_buf_add_ref = dbuf_add_ref 4063 void 4064 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag) 4065 { 4066 int64_t holds = zfs_refcount_add(&db->db_holds, tag); 4067 VERIFY3S(holds, >, 1); 4068 } 4069 4070 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 4071 boolean_t 4072 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 4073 const void *tag) 4074 { 4075 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4076 dmu_buf_impl_t *found_db; 4077 boolean_t result = B_FALSE; 4078 4079 if (blkid == DMU_BONUS_BLKID) 4080 found_db = dbuf_find_bonus(os, obj); 4081 else 4082 found_db = dbuf_find(os, obj, 0, blkid, NULL); 4083 4084 if (found_db != NULL) { 4085 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 4086 (void) zfs_refcount_add(&db->db_holds, tag); 4087 result = B_TRUE; 4088 } 4089 mutex_exit(&found_db->db_mtx); 4090 } 4091 return (result); 4092 } 4093 4094 /* 4095 * If you call dbuf_rele() you had better not be referencing the dnode handle 4096 * unless you have some other direct or indirect hold on the dnode. (An indirect 4097 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 4098 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 4099 * dnode's parent dbuf evicting its dnode handles. 4100 */ 4101 void 4102 dbuf_rele(dmu_buf_impl_t *db, const void *tag) 4103 { 4104 mutex_enter(&db->db_mtx); 4105 dbuf_rele_and_unlock(db, tag, B_FALSE); 4106 } 4107 4108 void 4109 dmu_buf_rele(dmu_buf_t *db, const void *tag) 4110 { 4111 dbuf_rele((dmu_buf_impl_t *)db, tag); 4112 } 4113 4114 /* 4115 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 4116 * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 4117 * argument should be set if we are already in the dbuf-evicting code 4118 * path, in which case we don't want to recursively evict. This allows us to 4119 * avoid deeply nested stacks that would have a call flow similar to this: 4120 * 4121 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 4122 * ^ | 4123 * | | 4124 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 4125 * 4126 */ 4127 void 4128 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting) 4129 { 4130 int64_t holds; 4131 uint64_t size; 4132 4133 ASSERT(MUTEX_HELD(&db->db_mtx)); 4134 DBUF_VERIFY(db); 4135 4136 /* 4137 * Remove the reference to the dbuf before removing its hold on the 4138 * dnode so we can guarantee in dnode_move() that a referenced bonus 4139 * buffer has a corresponding dnode hold. 4140 */ 4141 holds = zfs_refcount_remove(&db->db_holds, tag); 4142 ASSERT(holds >= 0); 4143 4144 /* 4145 * We can't freeze indirects if there is a possibility that they 4146 * may be modified in the current syncing context. 4147 */ 4148 if (db->db_buf != NULL && 4149 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 4150 arc_buf_freeze(db->db_buf); 4151 } 4152 4153 if (holds == db->db_dirtycnt && 4154 db->db_level == 0 && db->db_user_immediate_evict) 4155 dbuf_evict_user(db); 4156 4157 if (holds == 0) { 4158 if (db->db_blkid == DMU_BONUS_BLKID) { 4159 dnode_t *dn; 4160 boolean_t evict_dbuf = db->db_pending_evict; 4161 4162 /* 4163 * If the dnode moves here, we cannot cross this 4164 * barrier until the move completes. 4165 */ 4166 DB_DNODE_ENTER(db); 4167 4168 dn = DB_DNODE(db); 4169 atomic_dec_32(&dn->dn_dbufs_count); 4170 4171 /* 4172 * Decrementing the dbuf count means that the bonus 4173 * buffer's dnode hold is no longer discounted in 4174 * dnode_move(). The dnode cannot move until after 4175 * the dnode_rele() below. 4176 */ 4177 DB_DNODE_EXIT(db); 4178 4179 /* 4180 * Do not reference db after its lock is dropped. 4181 * Another thread may evict it. 4182 */ 4183 mutex_exit(&db->db_mtx); 4184 4185 if (evict_dbuf) 4186 dnode_evict_bonus(dn); 4187 4188 dnode_rele(dn, db); 4189 } else if (db->db_buf == NULL) { 4190 /* 4191 * This is a special case: we never associated this 4192 * dbuf with any data allocated from the ARC. 4193 */ 4194 ASSERT(db->db_state == DB_UNCACHED || 4195 db->db_state == DB_NOFILL); 4196 dbuf_destroy(db); 4197 } else if (arc_released(db->db_buf)) { 4198 /* 4199 * This dbuf has anonymous data associated with it. 4200 */ 4201 dbuf_destroy(db); 4202 } else if (!db->db_partial_read && !DBUF_IS_CACHEABLE(db)) { 4203 /* 4204 * We don't expect more accesses to the dbuf, and it 4205 * is either not cacheable or was marked for eviction. 4206 */ 4207 dbuf_destroy(db); 4208 } else if (!multilist_link_active(&db->db_cache_link)) { 4209 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 4210 4211 dbuf_cached_state_t dcs = 4212 dbuf_include_in_metadata_cache(db) ? 4213 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 4214 db->db_caching_status = dcs; 4215 4216 multilist_insert(&dbuf_caches[dcs].cache, db); 4217 uint64_t db_size = db->db.db_size; 4218 uint64_t dbu_size = dmu_buf_user_size(&db->db); 4219 (void) zfs_refcount_add_many( 4220 &dbuf_caches[dcs].size, db_size, db); 4221 size = zfs_refcount_add_many( 4222 &dbuf_caches[dcs].size, dbu_size, db->db_user); 4223 uint8_t db_level = db->db_level; 4224 mutex_exit(&db->db_mtx); 4225 4226 if (dcs == DB_DBUF_METADATA_CACHE) { 4227 DBUF_STAT_BUMP(metadata_cache_count); 4228 DBUF_STAT_MAX(metadata_cache_size_bytes_max, 4229 size); 4230 } else { 4231 DBUF_STAT_BUMP(cache_count); 4232 DBUF_STAT_MAX(cache_size_bytes_max, size); 4233 DBUF_STAT_BUMP(cache_levels[db_level]); 4234 DBUF_STAT_INCR(cache_levels_bytes[db_level], 4235 db_size + dbu_size); 4236 } 4237 4238 if (dcs == DB_DBUF_CACHE && !evicting) 4239 dbuf_evict_notify(size); 4240 } 4241 } else { 4242 mutex_exit(&db->db_mtx); 4243 } 4244 } 4245 4246 #pragma weak dmu_buf_refcount = dbuf_refcount 4247 uint64_t 4248 dbuf_refcount(dmu_buf_impl_t *db) 4249 { 4250 return (zfs_refcount_count(&db->db_holds)); 4251 } 4252 4253 uint64_t 4254 dmu_buf_user_refcount(dmu_buf_t *db_fake) 4255 { 4256 uint64_t holds; 4257 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4258 4259 mutex_enter(&db->db_mtx); 4260 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); 4261 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; 4262 mutex_exit(&db->db_mtx); 4263 4264 return (holds); 4265 } 4266 4267 void * 4268 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 4269 dmu_buf_user_t *new_user) 4270 { 4271 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4272 4273 mutex_enter(&db->db_mtx); 4274 dbuf_verify_user(db, DBVU_NOT_EVICTING); 4275 if (db->db_user == old_user) 4276 db->db_user = new_user; 4277 else 4278 old_user = db->db_user; 4279 dbuf_verify_user(db, DBVU_NOT_EVICTING); 4280 mutex_exit(&db->db_mtx); 4281 4282 return (old_user); 4283 } 4284 4285 void * 4286 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 4287 { 4288 return (dmu_buf_replace_user(db_fake, NULL, user)); 4289 } 4290 4291 void * 4292 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 4293 { 4294 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4295 4296 db->db_user_immediate_evict = TRUE; 4297 return (dmu_buf_set_user(db_fake, user)); 4298 } 4299 4300 void * 4301 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 4302 { 4303 return (dmu_buf_replace_user(db_fake, user, NULL)); 4304 } 4305 4306 void * 4307 dmu_buf_get_user(dmu_buf_t *db_fake) 4308 { 4309 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4310 4311 dbuf_verify_user(db, DBVU_NOT_EVICTING); 4312 return (db->db_user); 4313 } 4314 4315 uint64_t 4316 dmu_buf_user_size(dmu_buf_t *db_fake) 4317 { 4318 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4319 if (db->db_user == NULL) 4320 return (0); 4321 return (atomic_load_64(&db->db_user->dbu_size)); 4322 } 4323 4324 void 4325 dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd) 4326 { 4327 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4328 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 4329 ASSERT3P(db->db_user, !=, NULL); 4330 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd); 4331 atomic_add_64(&db->db_user->dbu_size, nadd); 4332 } 4333 4334 void 4335 dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub) 4336 { 4337 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 4338 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 4339 ASSERT3P(db->db_user, !=, NULL); 4340 ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub); 4341 atomic_sub_64(&db->db_user->dbu_size, nsub); 4342 } 4343 4344 void 4345 dmu_buf_user_evict_wait(void) 4346 { 4347 taskq_wait(dbu_evict_taskq); 4348 } 4349 4350 blkptr_t * 4351 dmu_buf_get_blkptr(dmu_buf_t *db) 4352 { 4353 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4354 return (dbi->db_blkptr); 4355 } 4356 4357 objset_t * 4358 dmu_buf_get_objset(dmu_buf_t *db) 4359 { 4360 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 4361 return (dbi->db_objset); 4362 } 4363 4364 static void 4365 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 4366 { 4367 /* ASSERT(dmu_tx_is_syncing(tx) */ 4368 ASSERT(MUTEX_HELD(&db->db_mtx)); 4369 4370 if (db->db_blkptr != NULL) 4371 return; 4372 4373 if (db->db_blkid == DMU_SPILL_BLKID) { 4374 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 4375 BP_ZERO(db->db_blkptr); 4376 return; 4377 } 4378 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 4379 /* 4380 * This buffer was allocated at a time when there was 4381 * no available blkptrs from the dnode, or it was 4382 * inappropriate to hook it in (i.e., nlevels mismatch). 4383 */ 4384 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 4385 ASSERT(db->db_parent == NULL); 4386 db->db_parent = dn->dn_dbuf; 4387 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 4388 DBUF_VERIFY(db); 4389 } else { 4390 dmu_buf_impl_t *parent = db->db_parent; 4391 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 4392 4393 ASSERT(dn->dn_phys->dn_nlevels > 1); 4394 if (parent == NULL) { 4395 mutex_exit(&db->db_mtx); 4396 rw_enter(&dn->dn_struct_rwlock, RW_READER); 4397 parent = dbuf_hold_level(dn, db->db_level + 1, 4398 db->db_blkid >> epbs, db); 4399 rw_exit(&dn->dn_struct_rwlock); 4400 mutex_enter(&db->db_mtx); 4401 db->db_parent = parent; 4402 } 4403 db->db_blkptr = (blkptr_t *)parent->db.db_data + 4404 (db->db_blkid & ((1ULL << epbs) - 1)); 4405 DBUF_VERIFY(db); 4406 } 4407 } 4408 4409 static void 4410 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4411 { 4412 dmu_buf_impl_t *db = dr->dr_dbuf; 4413 void *data = dr->dt.dl.dr_data; 4414 4415 ASSERT0(db->db_level); 4416 ASSERT(MUTEX_HELD(&db->db_mtx)); 4417 ASSERT(db->db_blkid == DMU_BONUS_BLKID); 4418 ASSERT(data != NULL); 4419 4420 dnode_t *dn = dr->dr_dnode; 4421 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 4422 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 4423 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys)); 4424 4425 dbuf_sync_leaf_verify_bonus_dnode(dr); 4426 4427 dbuf_undirty_bonus(dr); 4428 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 4429 } 4430 4431 /* 4432 * When syncing out a blocks of dnodes, adjust the block to deal with 4433 * encryption. Normally, we make sure the block is decrypted before writing 4434 * it. If we have crypt params, then we are writing a raw (encrypted) block, 4435 * from a raw receive. In this case, set the ARC buf's crypt params so 4436 * that the BP will be filled with the correct byteorder, salt, iv, and mac. 4437 */ 4438 static void 4439 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) 4440 { 4441 int err; 4442 dmu_buf_impl_t *db = dr->dr_dbuf; 4443 4444 ASSERT(MUTEX_HELD(&db->db_mtx)); 4445 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 4446 ASSERT3U(db->db_level, ==, 0); 4447 4448 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { 4449 zbookmark_phys_t zb; 4450 4451 /* 4452 * Unfortunately, there is currently no mechanism for 4453 * syncing context to handle decryption errors. An error 4454 * here is only possible if an attacker maliciously 4455 * changed a dnode block and updated the associated 4456 * checksums going up the block tree. 4457 */ 4458 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 4459 db->db.db_object, db->db_level, db->db_blkid); 4460 err = arc_untransform(db->db_buf, db->db_objset->os_spa, 4461 &zb, B_TRUE); 4462 if (err) 4463 panic("Invalid dnode block MAC"); 4464 } else if (dr->dt.dl.dr_has_raw_params) { 4465 (void) arc_release(dr->dt.dl.dr_data, db); 4466 arc_convert_to_raw(dr->dt.dl.dr_data, 4467 dmu_objset_id(db->db_objset), 4468 dr->dt.dl.dr_byteorder, DMU_OT_DNODE, 4469 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); 4470 } 4471 } 4472 4473 /* 4474 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it 4475 * is critical the we not allow the compiler to inline this function in to 4476 * dbuf_sync_list() thereby drastically bloating the stack usage. 4477 */ 4478 noinline static void 4479 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4480 { 4481 dmu_buf_impl_t *db = dr->dr_dbuf; 4482 dnode_t *dn = dr->dr_dnode; 4483 4484 ASSERT(dmu_tx_is_syncing(tx)); 4485 4486 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4487 4488 mutex_enter(&db->db_mtx); 4489 4490 ASSERT(db->db_level > 0); 4491 DBUF_VERIFY(db); 4492 4493 /* Read the block if it hasn't been read yet. */ 4494 if (db->db_buf == NULL) { 4495 mutex_exit(&db->db_mtx); 4496 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 4497 mutex_enter(&db->db_mtx); 4498 } 4499 ASSERT3U(db->db_state, ==, DB_CACHED); 4500 ASSERT(db->db_buf != NULL); 4501 4502 /* Indirect block size must match what the dnode thinks it is. */ 4503 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4504 dbuf_check_blkptr(dn, db); 4505 4506 /* Provide the pending dirty record to child dbufs */ 4507 db->db_data_pending = dr; 4508 4509 mutex_exit(&db->db_mtx); 4510 4511 dbuf_write(dr, db->db_buf, tx); 4512 4513 zio_t *zio = dr->dr_zio; 4514 mutex_enter(&dr->dt.di.dr_mtx); 4515 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 4516 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 4517 mutex_exit(&dr->dt.di.dr_mtx); 4518 zio_nowait(zio); 4519 } 4520 4521 /* 4522 * Verify that the size of the data in our bonus buffer does not exceed 4523 * its recorded size. 4524 * 4525 * The purpose of this verification is to catch any cases in development 4526 * where the size of a phys structure (i.e space_map_phys_t) grows and, 4527 * due to incorrect feature management, older pools expect to read more 4528 * data even though they didn't actually write it to begin with. 4529 * 4530 * For a example, this would catch an error in the feature logic where we 4531 * open an older pool and we expect to write the space map histogram of 4532 * a space map with size SPACE_MAP_SIZE_V0. 4533 */ 4534 static void 4535 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr) 4536 { 4537 #ifdef ZFS_DEBUG 4538 dnode_t *dn = dr->dr_dnode; 4539 4540 /* 4541 * Encrypted bonus buffers can have data past their bonuslen. 4542 * Skip the verification of these blocks. 4543 */ 4544 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)) 4545 return; 4546 4547 uint16_t bonuslen = dn->dn_phys->dn_bonuslen; 4548 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 4549 ASSERT3U(bonuslen, <=, maxbonuslen); 4550 4551 arc_buf_t *datap = dr->dt.dl.dr_data; 4552 char *datap_end = ((char *)datap) + bonuslen; 4553 char *datap_max = ((char *)datap) + maxbonuslen; 4554 4555 /* ensure that everything is zero after our data */ 4556 for (; datap_end < datap_max; datap_end++) 4557 ASSERT(*datap_end == 0); 4558 #endif 4559 } 4560 4561 static blkptr_t * 4562 dbuf_lightweight_bp(dbuf_dirty_record_t *dr) 4563 { 4564 /* This must be a lightweight dirty record. */ 4565 ASSERT3P(dr->dr_dbuf, ==, NULL); 4566 dnode_t *dn = dr->dr_dnode; 4567 4568 if (dn->dn_phys->dn_nlevels == 1) { 4569 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr); 4570 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]); 4571 } else { 4572 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf; 4573 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 4574 VERIFY3U(parent_db->db_level, ==, 1); 4575 VERIFY3P(DB_DNODE(parent_db), ==, dn); 4576 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid); 4577 blkptr_t *bp = parent_db->db.db_data; 4578 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]); 4579 } 4580 } 4581 4582 static void 4583 dbuf_lightweight_ready(zio_t *zio) 4584 { 4585 dbuf_dirty_record_t *dr = zio->io_private; 4586 blkptr_t *bp = zio->io_bp; 4587 4588 if (zio->io_error != 0) 4589 return; 4590 4591 dnode_t *dn = dr->dr_dnode; 4592 4593 blkptr_t *bp_orig = dbuf_lightweight_bp(dr); 4594 spa_t *spa = dmu_objset_spa(dn->dn_objset); 4595 int64_t delta = bp_get_dsize_sync(spa, bp) - 4596 bp_get_dsize_sync(spa, bp_orig); 4597 dnode_diduse_space(dn, delta); 4598 4599 uint64_t blkid = dr->dt.dll.dr_blkid; 4600 mutex_enter(&dn->dn_mtx); 4601 if (blkid > dn->dn_phys->dn_maxblkid) { 4602 ASSERT0(dn->dn_objset->os_raw_receive); 4603 dn->dn_phys->dn_maxblkid = blkid; 4604 } 4605 mutex_exit(&dn->dn_mtx); 4606 4607 if (!BP_IS_EMBEDDED(bp)) { 4608 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1; 4609 BP_SET_FILL(bp, fill); 4610 } 4611 4612 dmu_buf_impl_t *parent_db; 4613 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1); 4614 if (dr->dr_parent == NULL) { 4615 parent_db = dn->dn_dbuf; 4616 } else { 4617 parent_db = dr->dr_parent->dr_dbuf; 4618 } 4619 rw_enter(&parent_db->db_rwlock, RW_WRITER); 4620 *bp_orig = *bp; 4621 rw_exit(&parent_db->db_rwlock); 4622 } 4623 4624 static void 4625 dbuf_lightweight_done(zio_t *zio) 4626 { 4627 dbuf_dirty_record_t *dr = zio->io_private; 4628 4629 VERIFY0(zio->io_error); 4630 4631 objset_t *os = dr->dr_dnode->dn_objset; 4632 dmu_tx_t *tx = os->os_synctx; 4633 4634 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 4635 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); 4636 } else { 4637 dsl_dataset_t *ds = os->os_dsl_dataset; 4638 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE); 4639 dsl_dataset_block_born(ds, zio->io_bp, tx); 4640 } 4641 4642 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted, 4643 zio->io_txg); 4644 4645 abd_free(dr->dt.dll.dr_abd); 4646 kmem_free(dr, sizeof (*dr)); 4647 } 4648 4649 noinline static void 4650 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4651 { 4652 dnode_t *dn = dr->dr_dnode; 4653 zio_t *pio; 4654 if (dn->dn_phys->dn_nlevels == 1) { 4655 pio = dn->dn_zio; 4656 } else { 4657 pio = dr->dr_parent->dr_zio; 4658 } 4659 4660 zbookmark_phys_t zb = { 4661 .zb_objset = dmu_objset_id(dn->dn_objset), 4662 .zb_object = dn->dn_object, 4663 .zb_level = 0, 4664 .zb_blkid = dr->dt.dll.dr_blkid, 4665 }; 4666 4667 /* 4668 * See comment in dbuf_write(). This is so that zio->io_bp_orig 4669 * will have the old BP in dbuf_lightweight_done(). 4670 */ 4671 dr->dr_bp_copy = *dbuf_lightweight_bp(dr); 4672 4673 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset), 4674 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd, 4675 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd), 4676 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL, 4677 dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE, 4678 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb); 4679 4680 zio_nowait(dr->dr_zio); 4681 } 4682 4683 /* 4684 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is 4685 * critical the we not allow the compiler to inline this function in to 4686 * dbuf_sync_list() thereby drastically bloating the stack usage. 4687 */ 4688 noinline static void 4689 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 4690 { 4691 arc_buf_t **datap = &dr->dt.dl.dr_data; 4692 dmu_buf_impl_t *db = dr->dr_dbuf; 4693 dnode_t *dn = dr->dr_dnode; 4694 objset_t *os; 4695 uint64_t txg = tx->tx_txg; 4696 4697 ASSERT(dmu_tx_is_syncing(tx)); 4698 4699 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 4700 4701 mutex_enter(&db->db_mtx); 4702 /* 4703 * To be synced, we must be dirtied. But we might have been freed 4704 * after the dirty. 4705 */ 4706 if (db->db_state == DB_UNCACHED) { 4707 /* This buffer has been freed since it was dirtied */ 4708 ASSERT3P(db->db.db_data, ==, NULL); 4709 } else if (db->db_state == DB_FILL) { 4710 /* This buffer was freed and is now being re-filled */ 4711 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 4712 } else if (db->db_state == DB_READ) { 4713 /* 4714 * This buffer was either cloned or had a Direct I/O write 4715 * occur and has an in-flgiht read on the BP. It is safe to 4716 * issue the write here, because the read has already been 4717 * issued and the contents won't change. 4718 * 4719 * We can verify the case of both the clone and Direct I/O 4720 * write by making sure the first dirty record for the dbuf 4721 * has no ARC buffer associated with it. 4722 */ 4723 dbuf_dirty_record_t *dr_head = 4724 list_head(&db->db_dirty_records); 4725 ASSERT3P(db->db_buf, ==, NULL); 4726 ASSERT3P(db->db.db_data, ==, NULL); 4727 ASSERT3P(dr_head->dt.dl.dr_data, ==, NULL); 4728 ASSERT3U(dr_head->dt.dl.dr_override_state, ==, DR_OVERRIDDEN); 4729 } else { 4730 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 4731 } 4732 DBUF_VERIFY(db); 4733 4734 if (db->db_blkid == DMU_SPILL_BLKID) { 4735 mutex_enter(&dn->dn_mtx); 4736 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 4737 /* 4738 * In the previous transaction group, the bonus buffer 4739 * was entirely used to store the attributes for the 4740 * dnode which overrode the dn_spill field. However, 4741 * when adding more attributes to the file a spill 4742 * block was required to hold the extra attributes. 4743 * 4744 * Make sure to clear the garbage left in the dn_spill 4745 * field from the previous attributes in the bonus 4746 * buffer. Otherwise, after writing out the spill 4747 * block to the new allocated dva, it will free 4748 * the old block pointed to by the invalid dn_spill. 4749 */ 4750 db->db_blkptr = NULL; 4751 } 4752 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 4753 mutex_exit(&dn->dn_mtx); 4754 } 4755 4756 /* 4757 * If this is a bonus buffer, simply copy the bonus data into the 4758 * dnode. It will be written out when the dnode is synced (and it 4759 * will be synced, since it must have been dirty for dbuf_sync to 4760 * be called). 4761 */ 4762 if (db->db_blkid == DMU_BONUS_BLKID) { 4763 ASSERT(dr->dr_dbuf == db); 4764 dbuf_sync_bonus(dr, tx); 4765 return; 4766 } 4767 4768 os = dn->dn_objset; 4769 4770 /* 4771 * This function may have dropped the db_mtx lock allowing a dmu_sync 4772 * operation to sneak in. As a result, we need to ensure that we 4773 * don't check the dr_override_state until we have returned from 4774 * dbuf_check_blkptr. 4775 */ 4776 dbuf_check_blkptr(dn, db); 4777 4778 /* 4779 * If this buffer is in the middle of an immediate write, wait for the 4780 * synchronous IO to complete. 4781 * 4782 * This is also valid even with Direct I/O writes setting a dirty 4783 * records override state into DR_IN_DMU_SYNC, because all 4784 * Direct I/O writes happen in open-context. 4785 */ 4786 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 4787 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 4788 cv_wait(&db->db_changed, &db->db_mtx); 4789 } 4790 4791 /* 4792 * If this is a dnode block, ensure it is appropriately encrypted 4793 * or decrypted, depending on what we are writing to it this txg. 4794 */ 4795 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) 4796 dbuf_prepare_encrypted_dnode_leaf(dr); 4797 4798 if (*datap != NULL && *datap == db->db_buf && 4799 dn->dn_object != DMU_META_DNODE_OBJECT && 4800 zfs_refcount_count(&db->db_holds) > 1) { 4801 /* 4802 * If this buffer is currently "in use" (i.e., there 4803 * are active holds and db_data still references it), 4804 * then make a copy before we start the write so that 4805 * any modifications from the open txg will not leak 4806 * into this write. 4807 * 4808 * NOTE: this copy does not need to be made for 4809 * objects only modified in the syncing context (e.g. 4810 * DNONE_DNODE blocks). 4811 */ 4812 int psize = arc_buf_size(*datap); 4813 int lsize = arc_buf_lsize(*datap); 4814 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 4815 enum zio_compress compress_type = arc_get_compression(*datap); 4816 uint8_t complevel = arc_get_complevel(*datap); 4817 4818 if (arc_is_encrypted(*datap)) { 4819 boolean_t byteorder; 4820 uint8_t salt[ZIO_DATA_SALT_LEN]; 4821 uint8_t iv[ZIO_DATA_IV_LEN]; 4822 uint8_t mac[ZIO_DATA_MAC_LEN]; 4823 4824 arc_get_raw_params(*datap, &byteorder, salt, iv, mac); 4825 *datap = arc_alloc_raw_buf(os->os_spa, db, 4826 dmu_objset_id(os), byteorder, salt, iv, mac, 4827 dn->dn_type, psize, lsize, compress_type, 4828 complevel); 4829 } else if (compress_type != ZIO_COMPRESS_OFF) { 4830 ASSERT3U(type, ==, ARC_BUFC_DATA); 4831 *datap = arc_alloc_compressed_buf(os->os_spa, db, 4832 psize, lsize, compress_type, complevel); 4833 } else { 4834 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 4835 } 4836 memcpy((*datap)->b_data, db->db.db_data, psize); 4837 } 4838 db->db_data_pending = dr; 4839 4840 mutex_exit(&db->db_mtx); 4841 4842 dbuf_write(dr, *datap, tx); 4843 4844 ASSERT(!list_link_active(&dr->dr_dirty_node)); 4845 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 4846 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr); 4847 } else { 4848 zio_nowait(dr->dr_zio); 4849 } 4850 } 4851 4852 /* 4853 * Syncs out a range of dirty records for indirect or leaf dbufs. May be 4854 * called recursively from dbuf_sync_indirect(). 4855 */ 4856 void 4857 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 4858 { 4859 dbuf_dirty_record_t *dr; 4860 4861 while ((dr = list_head(list))) { 4862 if (dr->dr_zio != NULL) { 4863 /* 4864 * If we find an already initialized zio then we 4865 * are processing the meta-dnode, and we have finished. 4866 * The dbufs for all dnodes are put back on the list 4867 * during processing, so that we can zio_wait() 4868 * these IOs after initiating all child IOs. 4869 */ 4870 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 4871 DMU_META_DNODE_OBJECT); 4872 break; 4873 } 4874 list_remove(list, dr); 4875 if (dr->dr_dbuf == NULL) { 4876 dbuf_sync_lightweight(dr, tx); 4877 } else { 4878 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 4879 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 4880 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 4881 } 4882 if (dr->dr_dbuf->db_level > 0) 4883 dbuf_sync_indirect(dr, tx); 4884 else 4885 dbuf_sync_leaf(dr, tx); 4886 } 4887 } 4888 } 4889 4890 static void 4891 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 4892 { 4893 (void) buf; 4894 dmu_buf_impl_t *db = vdb; 4895 dnode_t *dn; 4896 blkptr_t *bp = zio->io_bp; 4897 blkptr_t *bp_orig = &zio->io_bp_orig; 4898 spa_t *spa = zio->io_spa; 4899 int64_t delta; 4900 uint64_t fill = 0; 4901 int i; 4902 4903 ASSERT3P(db->db_blkptr, !=, NULL); 4904 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 4905 4906 DB_DNODE_ENTER(db); 4907 dn = DB_DNODE(db); 4908 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 4909 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 4910 zio->io_prev_space_delta = delta; 4911 4912 if (BP_GET_LOGICAL_BIRTH(bp) != 0) { 4913 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 4914 BP_GET_TYPE(bp) == dn->dn_type) || 4915 (db->db_blkid == DMU_SPILL_BLKID && 4916 BP_GET_TYPE(bp) == dn->dn_bonustype) || 4917 BP_IS_EMBEDDED(bp)); 4918 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 4919 } 4920 4921 mutex_enter(&db->db_mtx); 4922 4923 #ifdef ZFS_DEBUG 4924 if (db->db_blkid == DMU_SPILL_BLKID) { 4925 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 4926 ASSERT(!(BP_IS_HOLE(bp)) && 4927 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 4928 } 4929 #endif 4930 4931 if (db->db_level == 0) { 4932 mutex_enter(&dn->dn_mtx); 4933 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 4934 db->db_blkid != DMU_SPILL_BLKID) { 4935 ASSERT0(db->db_objset->os_raw_receive); 4936 dn->dn_phys->dn_maxblkid = db->db_blkid; 4937 } 4938 mutex_exit(&dn->dn_mtx); 4939 4940 if (dn->dn_type == DMU_OT_DNODE) { 4941 i = 0; 4942 while (i < db->db.db_size) { 4943 dnode_phys_t *dnp = 4944 (void *)(((char *)db->db.db_data) + i); 4945 4946 i += DNODE_MIN_SIZE; 4947 if (dnp->dn_type != DMU_OT_NONE) { 4948 fill++; 4949 for (int j = 0; j < dnp->dn_nblkptr; 4950 j++) { 4951 (void) zfs_blkptr_verify(spa, 4952 &dnp->dn_blkptr[j], 4953 BLK_CONFIG_SKIP, 4954 BLK_VERIFY_HALT); 4955 } 4956 if (dnp->dn_flags & 4957 DNODE_FLAG_SPILL_BLKPTR) { 4958 (void) zfs_blkptr_verify(spa, 4959 DN_SPILL_BLKPTR(dnp), 4960 BLK_CONFIG_SKIP, 4961 BLK_VERIFY_HALT); 4962 } 4963 i += dnp->dn_extra_slots * 4964 DNODE_MIN_SIZE; 4965 } 4966 } 4967 } else { 4968 if (BP_IS_HOLE(bp)) { 4969 fill = 0; 4970 } else { 4971 fill = 1; 4972 } 4973 } 4974 } else { 4975 blkptr_t *ibp = db->db.db_data; 4976 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 4977 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 4978 if (BP_IS_HOLE(ibp)) 4979 continue; 4980 (void) zfs_blkptr_verify(spa, ibp, 4981 BLK_CONFIG_SKIP, BLK_VERIFY_HALT); 4982 fill += BP_GET_FILL(ibp); 4983 } 4984 } 4985 DB_DNODE_EXIT(db); 4986 4987 if (!BP_IS_EMBEDDED(bp)) 4988 BP_SET_FILL(bp, fill); 4989 4990 mutex_exit(&db->db_mtx); 4991 4992 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG); 4993 *db->db_blkptr = *bp; 4994 dmu_buf_unlock_parent(db, dblt, FTAG); 4995 } 4996 4997 /* 4998 * This function gets called just prior to running through the compression 4999 * stage of the zio pipeline. If we're an indirect block comprised of only 5000 * holes, then we want this indirect to be compressed away to a hole. In 5001 * order to do that we must zero out any information about the holes that 5002 * this indirect points to prior to before we try to compress it. 5003 */ 5004 static void 5005 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 5006 { 5007 (void) zio, (void) buf; 5008 dmu_buf_impl_t *db = vdb; 5009 blkptr_t *bp; 5010 unsigned int epbs, i; 5011 5012 ASSERT3U(db->db_level, >, 0); 5013 DB_DNODE_ENTER(db); 5014 epbs = DB_DNODE(db)->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 5015 DB_DNODE_EXIT(db); 5016 ASSERT3U(epbs, <, 31); 5017 5018 /* Determine if all our children are holes */ 5019 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) { 5020 if (!BP_IS_HOLE(bp)) 5021 break; 5022 } 5023 5024 /* 5025 * If all the children are holes, then zero them all out so that 5026 * we may get compressed away. 5027 */ 5028 if (i == 1ULL << epbs) { 5029 /* 5030 * We only found holes. Grab the rwlock to prevent 5031 * anybody from reading the blocks we're about to 5032 * zero out. 5033 */ 5034 rw_enter(&db->db_rwlock, RW_WRITER); 5035 memset(db->db.db_data, 0, db->db.db_size); 5036 rw_exit(&db->db_rwlock); 5037 } 5038 } 5039 5040 static void 5041 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 5042 { 5043 (void) buf; 5044 dmu_buf_impl_t *db = vdb; 5045 blkptr_t *bp_orig = &zio->io_bp_orig; 5046 blkptr_t *bp = db->db_blkptr; 5047 objset_t *os = db->db_objset; 5048 dmu_tx_t *tx = os->os_synctx; 5049 5050 ASSERT0(zio->io_error); 5051 ASSERT(db->db_blkptr == bp); 5052 5053 /* 5054 * For nopwrites and rewrites we ensure that the bp matches our 5055 * original and bypass all the accounting. 5056 */ 5057 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 5058 ASSERT(BP_EQUAL(bp, bp_orig)); 5059 } else { 5060 dsl_dataset_t *ds = os->os_dsl_dataset; 5061 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 5062 dsl_dataset_block_born(ds, bp, tx); 5063 } 5064 5065 mutex_enter(&db->db_mtx); 5066 5067 DBUF_VERIFY(db); 5068 5069 dbuf_dirty_record_t *dr = db->db_data_pending; 5070 dnode_t *dn = dr->dr_dnode; 5071 ASSERT(!list_link_active(&dr->dr_dirty_node)); 5072 ASSERT(dr->dr_dbuf == db); 5073 ASSERT(list_next(&db->db_dirty_records, dr) == NULL); 5074 list_remove(&db->db_dirty_records, dr); 5075 5076 #ifdef ZFS_DEBUG 5077 if (db->db_blkid == DMU_SPILL_BLKID) { 5078 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 5079 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 5080 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 5081 } 5082 #endif 5083 5084 if (db->db_level == 0) { 5085 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 5086 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 5087 5088 /* no dr_data if this is a NO_FILL or Direct I/O */ 5089 if (dr->dt.dl.dr_data != NULL && 5090 dr->dt.dl.dr_data != db->db_buf) { 5091 ASSERT3B(dr->dt.dl.dr_brtwrite, ==, B_FALSE); 5092 ASSERT3B(dr->dt.dl.dr_diowrite, ==, B_FALSE); 5093 arc_buf_destroy(dr->dt.dl.dr_data, db); 5094 } 5095 } else { 5096 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 5097 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 5098 if (!BP_IS_HOLE(db->db_blkptr)) { 5099 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift - 5100 SPA_BLKPTRSHIFT; 5101 ASSERT3U(db->db_blkid, <=, 5102 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 5103 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 5104 db->db.db_size); 5105 } 5106 mutex_destroy(&dr->dt.di.dr_mtx); 5107 list_destroy(&dr->dt.di.dr_children); 5108 } 5109 5110 cv_broadcast(&db->db_changed); 5111 ASSERT(db->db_dirtycnt > 0); 5112 db->db_dirtycnt -= 1; 5113 db->db_data_pending = NULL; 5114 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 5115 5116 dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted, 5117 zio->io_txg); 5118 5119 kmem_cache_free(dbuf_dirty_kmem_cache, dr); 5120 } 5121 5122 static void 5123 dbuf_write_nofill_ready(zio_t *zio) 5124 { 5125 dbuf_write_ready(zio, NULL, zio->io_private); 5126 } 5127 5128 static void 5129 dbuf_write_nofill_done(zio_t *zio) 5130 { 5131 dbuf_write_done(zio, NULL, zio->io_private); 5132 } 5133 5134 static void 5135 dbuf_write_override_ready(zio_t *zio) 5136 { 5137 dbuf_dirty_record_t *dr = zio->io_private; 5138 dmu_buf_impl_t *db = dr->dr_dbuf; 5139 5140 dbuf_write_ready(zio, NULL, db); 5141 } 5142 5143 static void 5144 dbuf_write_override_done(zio_t *zio) 5145 { 5146 dbuf_dirty_record_t *dr = zio->io_private; 5147 dmu_buf_impl_t *db = dr->dr_dbuf; 5148 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 5149 5150 mutex_enter(&db->db_mtx); 5151 if (!BP_EQUAL(zio->io_bp, obp)) { 5152 if (!BP_IS_HOLE(obp)) 5153 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 5154 arc_release(dr->dt.dl.dr_data, db); 5155 } 5156 mutex_exit(&db->db_mtx); 5157 5158 dbuf_write_done(zio, NULL, db); 5159 5160 if (zio->io_abd != NULL) 5161 abd_free(zio->io_abd); 5162 } 5163 5164 typedef struct dbuf_remap_impl_callback_arg { 5165 objset_t *drica_os; 5166 uint64_t drica_blk_birth; 5167 dmu_tx_t *drica_tx; 5168 } dbuf_remap_impl_callback_arg_t; 5169 5170 static void 5171 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 5172 void *arg) 5173 { 5174 dbuf_remap_impl_callback_arg_t *drica = arg; 5175 objset_t *os = drica->drica_os; 5176 spa_t *spa = dmu_objset_spa(os); 5177 dmu_tx_t *tx = drica->drica_tx; 5178 5179 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 5180 5181 if (os == spa_meta_objset(spa)) { 5182 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 5183 } else { 5184 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 5185 size, drica->drica_blk_birth, tx); 5186 } 5187 } 5188 5189 static void 5190 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx) 5191 { 5192 blkptr_t bp_copy = *bp; 5193 spa_t *spa = dmu_objset_spa(dn->dn_objset); 5194 dbuf_remap_impl_callback_arg_t drica; 5195 5196 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 5197 5198 drica.drica_os = dn->dn_objset; 5199 drica.drica_blk_birth = BP_GET_LOGICAL_BIRTH(bp); 5200 drica.drica_tx = tx; 5201 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 5202 &drica)) { 5203 /* 5204 * If the blkptr being remapped is tracked by a livelist, 5205 * then we need to make sure the livelist reflects the update. 5206 * First, cancel out the old blkptr by appending a 'FREE' 5207 * entry. Next, add an 'ALLOC' to track the new version. This 5208 * way we avoid trying to free an inaccurate blkptr at delete. 5209 * Note that embedded blkptrs are not tracked in livelists. 5210 */ 5211 if (dn->dn_objset != spa_meta_objset(spa)) { 5212 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset); 5213 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) && 5214 BP_GET_LOGICAL_BIRTH(bp) > 5215 ds->ds_dir->dd_origin_txg) { 5216 ASSERT(!BP_IS_EMBEDDED(bp)); 5217 ASSERT(dsl_dir_is_clone(ds->ds_dir)); 5218 ASSERT(spa_feature_is_enabled(spa, 5219 SPA_FEATURE_LIVELIST)); 5220 bplist_append(&ds->ds_dir->dd_pending_frees, 5221 bp); 5222 bplist_append(&ds->ds_dir->dd_pending_allocs, 5223 &bp_copy); 5224 } 5225 } 5226 5227 /* 5228 * The db_rwlock prevents dbuf_read_impl() from 5229 * dereferencing the BP while we are changing it. To 5230 * avoid lock contention, only grab it when we are actually 5231 * changing the BP. 5232 */ 5233 if (rw != NULL) 5234 rw_enter(rw, RW_WRITER); 5235 *bp = bp_copy; 5236 if (rw != NULL) 5237 rw_exit(rw); 5238 } 5239 } 5240 5241 /* 5242 * Remap any existing BP's to concrete vdevs, if possible. 5243 */ 5244 static void 5245 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 5246 { 5247 spa_t *spa = dmu_objset_spa(db->db_objset); 5248 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 5249 5250 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 5251 return; 5252 5253 if (db->db_level > 0) { 5254 blkptr_t *bp = db->db.db_data; 5255 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 5256 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx); 5257 } 5258 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 5259 dnode_phys_t *dnp = db->db.db_data; 5260 ASSERT3U(dn->dn_type, ==, DMU_OT_DNODE); 5261 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; 5262 i += dnp[i].dn_extra_slots + 1) { 5263 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 5264 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL : 5265 &dn->dn_dbuf->db_rwlock); 5266 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock, 5267 tx); 5268 } 5269 } 5270 } 5271 } 5272 5273 5274 /* 5275 * Populate dr->dr_zio with a zio to commit a dirty buffer to disk. 5276 * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio). 5277 */ 5278 static void 5279 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 5280 { 5281 dmu_buf_impl_t *db = dr->dr_dbuf; 5282 dnode_t *dn = dr->dr_dnode; 5283 objset_t *os; 5284 dmu_buf_impl_t *parent = db->db_parent; 5285 uint64_t txg = tx->tx_txg; 5286 zbookmark_phys_t zb; 5287 zio_prop_t zp; 5288 zio_t *pio; /* parent I/O */ 5289 int wp_flag = 0; 5290 5291 ASSERT(dmu_tx_is_syncing(tx)); 5292 5293 os = dn->dn_objset; 5294 5295 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 5296 /* 5297 * Private object buffers are released here rather than in 5298 * dbuf_dirty() since they are only modified in the syncing 5299 * context and we don't want the overhead of making multiple 5300 * copies of the data. 5301 */ 5302 if (BP_IS_HOLE(db->db_blkptr)) 5303 arc_buf_thaw(data); 5304 else 5305 dbuf_release_bp(db); 5306 dbuf_remap(dn, db, tx); 5307 } 5308 5309 if (parent != dn->dn_dbuf) { 5310 /* Our parent is an indirect block. */ 5311 /* We have a dirty parent that has been scheduled for write. */ 5312 ASSERT(parent && parent->db_data_pending); 5313 /* Our parent's buffer is one level closer to the dnode. */ 5314 ASSERT(db->db_level == parent->db_level-1); 5315 /* 5316 * We're about to modify our parent's db_data by modifying 5317 * our block pointer, so the parent must be released. 5318 */ 5319 ASSERT(arc_released(parent->db_buf)); 5320 pio = parent->db_data_pending->dr_zio; 5321 } else { 5322 /* Our parent is the dnode itself. */ 5323 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 5324 db->db_blkid != DMU_SPILL_BLKID) || 5325 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 5326 if (db->db_blkid != DMU_SPILL_BLKID) 5327 ASSERT3P(db->db_blkptr, ==, 5328 &dn->dn_phys->dn_blkptr[db->db_blkid]); 5329 pio = dn->dn_zio; 5330 } 5331 5332 ASSERT(db->db_level == 0 || data == db->db_buf); 5333 ASSERT3U(BP_GET_LOGICAL_BIRTH(db->db_blkptr), <=, txg); 5334 ASSERT(pio); 5335 5336 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 5337 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 5338 db->db.db_object, db->db_level, db->db_blkid); 5339 5340 if (db->db_blkid == DMU_SPILL_BLKID) 5341 wp_flag = WP_SPILL; 5342 wp_flag |= (data == NULL) ? WP_NOFILL : 0; 5343 5344 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 5345 5346 /* 5347 * We copy the blkptr now (rather than when we instantiate the dirty 5348 * record), because its value can change between open context and 5349 * syncing context. We do not need to hold dn_struct_rwlock to read 5350 * db_blkptr because we are in syncing context. 5351 */ 5352 dr->dr_bp_copy = *db->db_blkptr; 5353 5354 if (db->db_level == 0 && 5355 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 5356 /* 5357 * The BP for this block has been provided by open context 5358 * (by dmu_sync(), dmu_write_direct(), 5359 * or dmu_buf_write_embedded()). 5360 */ 5361 abd_t *contents = (data != NULL) ? 5362 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 5363 5364 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy, 5365 contents, db->db.db_size, db->db.db_size, &zp, 5366 dbuf_write_override_ready, NULL, 5367 dbuf_write_override_done, 5368 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 5369 mutex_enter(&db->db_mtx); 5370 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 5371 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 5372 dr->dt.dl.dr_copies, dr->dt.dl.dr_gang_copies, 5373 dr->dt.dl.dr_nopwrite, dr->dt.dl.dr_brtwrite); 5374 mutex_exit(&db->db_mtx); 5375 } else if (data == NULL) { 5376 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 5377 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 5378 dr->dr_zio = zio_write(pio, os->os_spa, txg, 5379 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 5380 dbuf_write_nofill_ready, NULL, 5381 dbuf_write_nofill_done, db, 5382 ZIO_PRIORITY_ASYNC_WRITE, 5383 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 5384 } else { 5385 ASSERT(arc_released(data)); 5386 5387 /* 5388 * For indirect blocks, we want to setup the children 5389 * ready callback so that we can properly handle an indirect 5390 * block that only contains holes. 5391 */ 5392 arc_write_done_func_t *children_ready_cb = NULL; 5393 if (db->db_level != 0) 5394 children_ready_cb = dbuf_write_children_ready; 5395 5396 dr->dr_zio = arc_write(pio, os->os_spa, txg, 5397 &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db), 5398 dbuf_is_l2cacheable(db, NULL), &zp, dbuf_write_ready, 5399 children_ready_cb, dbuf_write_done, db, 5400 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 5401 } 5402 } 5403 5404 EXPORT_SYMBOL(dbuf_find); 5405 EXPORT_SYMBOL(dbuf_is_metadata); 5406 EXPORT_SYMBOL(dbuf_destroy); 5407 EXPORT_SYMBOL(dbuf_whichblock); 5408 EXPORT_SYMBOL(dbuf_read); 5409 EXPORT_SYMBOL(dbuf_unoverride); 5410 EXPORT_SYMBOL(dbuf_free_range); 5411 EXPORT_SYMBOL(dbuf_new_size); 5412 EXPORT_SYMBOL(dbuf_release_bp); 5413 EXPORT_SYMBOL(dbuf_dirty); 5414 EXPORT_SYMBOL(dmu_buf_set_crypt_params); 5415 EXPORT_SYMBOL(dmu_buf_will_dirty); 5416 EXPORT_SYMBOL(dmu_buf_is_dirty); 5417 EXPORT_SYMBOL(dmu_buf_will_clone_or_dio); 5418 EXPORT_SYMBOL(dmu_buf_will_not_fill); 5419 EXPORT_SYMBOL(dmu_buf_will_fill); 5420 EXPORT_SYMBOL(dmu_buf_fill_done); 5421 EXPORT_SYMBOL(dmu_buf_rele); 5422 EXPORT_SYMBOL(dbuf_assign_arcbuf); 5423 EXPORT_SYMBOL(dbuf_prefetch); 5424 EXPORT_SYMBOL(dbuf_hold_impl); 5425 EXPORT_SYMBOL(dbuf_hold); 5426 EXPORT_SYMBOL(dbuf_hold_level); 5427 EXPORT_SYMBOL(dbuf_create_bonus); 5428 EXPORT_SYMBOL(dbuf_spill_set_blksz); 5429 EXPORT_SYMBOL(dbuf_rm_spill); 5430 EXPORT_SYMBOL(dbuf_add_ref); 5431 EXPORT_SYMBOL(dbuf_rele); 5432 EXPORT_SYMBOL(dbuf_rele_and_unlock); 5433 EXPORT_SYMBOL(dbuf_refcount); 5434 EXPORT_SYMBOL(dbuf_sync_list); 5435 EXPORT_SYMBOL(dmu_buf_set_user); 5436 EXPORT_SYMBOL(dmu_buf_set_user_ie); 5437 EXPORT_SYMBOL(dmu_buf_get_user); 5438 EXPORT_SYMBOL(dmu_buf_get_blkptr); 5439 5440 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW, 5441 "Maximum size in bytes of the dbuf cache."); 5442 5443 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW, 5444 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction."); 5445 5446 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW, 5447 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops."); 5448 5449 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW, 5450 "Maximum size in bytes of dbuf metadata cache."); 5451 5452 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW, 5453 "Set size of dbuf cache to log2 fraction of arc size."); 5454 5455 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW, 5456 "Set size of dbuf metadata cache to log2 fraction of arc size."); 5457 5458 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD, 5459 "Set size of dbuf cache mutex array as log2 shift."); 5460