1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_send.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dbuf.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/spa.h> 41 #include <sys/zio.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/zfeature.h> 46 #include <sys/blkptr.h> 47 #include <sys/range_tree.h> 48 #include <sys/callb.h> 49 #include <sys/abd.h> 50 #include <sys/vdev.h> 51 #include <sys/cityhash.h> 52 #include <sys/spa_impl.h> 53 54 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 55 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 56 57 #ifndef __lint 58 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 59 dmu_buf_evict_func_t *evict_func_sync, 60 dmu_buf_evict_func_t *evict_func_async, 61 dmu_buf_t **clear_on_evict_dbufp); 62 #endif /* ! __lint */ 63 64 /* 65 * Global data structures and functions for the dbuf cache. 66 */ 67 static kmem_cache_t *dbuf_kmem_cache; 68 static taskq_t *dbu_evict_taskq; 69 70 static kthread_t *dbuf_cache_evict_thread; 71 static kmutex_t dbuf_evict_lock; 72 static kcondvar_t dbuf_evict_cv; 73 static boolean_t dbuf_evict_thread_exit; 74 75 /* 76 * There are two dbuf caches; each dbuf can only be in one of them at a time. 77 * 78 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 79 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 80 * that represent the metadata that describes filesystems/snapshots/ 81 * bookmarks/properties/etc. We only evict from this cache when we export a 82 * pool, to short-circuit as much I/O as possible for all administrative 83 * commands that need the metadata. There is no eviction policy for this 84 * cache, because we try to only include types in it which would occupy a 85 * very small amount of space per object but create a large impact on the 86 * performance of these commands. Instead, after it reaches a maximum size 87 * (which should only happen on very small memory systems with a very large 88 * number of filesystem objects), we stop taking new dbufs into the 89 * metadata cache, instead putting them in the normal dbuf cache. 90 * 91 * 2. LRU cache of dbufs. The "dbuf cache" maintains a list of dbufs that 92 * are not currently held but have been recently released. These dbufs 93 * are not eligible for arc eviction until they are aged out of the cache. 94 * Dbufs that are aged out of the cache will be immediately destroyed and 95 * become eligible for arc eviction. 96 * 97 * Dbufs are added to these caches once the last hold is released. If a dbuf is 98 * later accessed and still exists in the dbuf cache, then it will be removed 99 * from the cache and later re-added to the head of the cache. 100 * 101 * If a given dbuf meets the requirements for the metadata cache, it will go 102 * there, otherwise it will be considered for the generic LRU dbuf cache. The 103 * caches and the refcounts tracking their sizes are stored in an array indexed 104 * by those caches' matching enum values (from dbuf_cached_state_t). 105 */ 106 typedef struct dbuf_cache { 107 multilist_t *cache; 108 zfs_refcount_t size; 109 } dbuf_cache_t; 110 dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 111 112 /* Size limits for the caches */ 113 uint64_t dbuf_cache_max_bytes = 0; 114 uint64_t dbuf_metadata_cache_max_bytes = 0; 115 /* Set the default sizes of the caches to log2 fraction of arc size */ 116 int dbuf_cache_shift = 5; 117 int dbuf_metadata_cache_shift = 6; 118 119 /* 120 * For diagnostic purposes, this is incremented whenever we can't add 121 * something to the metadata cache because it's full, and instead put 122 * the data in the regular dbuf cache. 123 */ 124 uint64_t dbuf_metadata_cache_overflow; 125 126 /* 127 * The LRU dbuf cache uses a three-stage eviction policy: 128 * - A low water marker designates when the dbuf eviction thread 129 * should stop evicting from the dbuf cache. 130 * - When we reach the maximum size (aka mid water mark), we 131 * signal the eviction thread to run. 132 * - The high water mark indicates when the eviction thread 133 * is unable to keep up with the incoming load and eviction must 134 * happen in the context of the calling thread. 135 * 136 * The dbuf cache: 137 * (max size) 138 * low water mid water hi water 139 * +----------------------------------------+----------+----------+ 140 * | | | | 141 * | | | | 142 * | | | | 143 * | | | | 144 * +----------------------------------------+----------+----------+ 145 * stop signal evict 146 * evicting eviction directly 147 * thread 148 * 149 * The high and low water marks indicate the operating range for the eviction 150 * thread. The low water mark is, by default, 90% of the total size of the 151 * cache and the high water mark is at 110% (both of these percentages can be 152 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 153 * respectively). The eviction thread will try to ensure that the cache remains 154 * within this range by waking up every second and checking if the cache is 155 * above the low water mark. The thread can also be woken up by callers adding 156 * elements into the cache if the cache is larger than the mid water (i.e max 157 * cache size). Once the eviction thread is woken up and eviction is required, 158 * it will continue evicting buffers until it's able to reduce the cache size 159 * to the low water mark. If the cache size continues to grow and hits the high 160 * water mark, then callers adding elments to the cache will begin to evict 161 * directly from the cache until the cache is no longer above the high water 162 * mark. 163 */ 164 165 /* 166 * The percentage above and below the maximum cache size. 167 */ 168 uint_t dbuf_cache_hiwater_pct = 10; 169 uint_t dbuf_cache_lowater_pct = 10; 170 171 /* ARGSUSED */ 172 static int 173 dbuf_cons(void *vdb, void *unused, int kmflag) 174 { 175 dmu_buf_impl_t *db = vdb; 176 bzero(db, sizeof (dmu_buf_impl_t)); 177 178 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 179 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 180 multilist_link_init(&db->db_cache_link); 181 zfs_refcount_create(&db->db_holds); 182 183 return (0); 184 } 185 186 /* ARGSUSED */ 187 static void 188 dbuf_dest(void *vdb, void *unused) 189 { 190 dmu_buf_impl_t *db = vdb; 191 mutex_destroy(&db->db_mtx); 192 cv_destroy(&db->db_changed); 193 ASSERT(!multilist_link_active(&db->db_cache_link)); 194 zfs_refcount_destroy(&db->db_holds); 195 } 196 197 /* 198 * dbuf hash table routines 199 */ 200 static dbuf_hash_table_t dbuf_hash_table; 201 202 static uint64_t dbuf_hash_count; 203 204 /* 205 * We use Cityhash for this. It's fast, and has good hash properties without 206 * requiring any large static buffers. 207 */ 208 static uint64_t 209 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 210 { 211 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 212 } 213 214 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 215 ((dbuf)->db.db_object == (obj) && \ 216 (dbuf)->db_objset == (os) && \ 217 (dbuf)->db_level == (level) && \ 218 (dbuf)->db_blkid == (blkid)) 219 220 dmu_buf_impl_t * 221 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 222 { 223 dbuf_hash_table_t *h = &dbuf_hash_table; 224 uint64_t hv = dbuf_hash(os, obj, level, blkid); 225 uint64_t idx = hv & h->hash_table_mask; 226 dmu_buf_impl_t *db; 227 228 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 229 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 230 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 231 mutex_enter(&db->db_mtx); 232 if (db->db_state != DB_EVICTING) { 233 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 234 return (db); 235 } 236 mutex_exit(&db->db_mtx); 237 } 238 } 239 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 240 return (NULL); 241 } 242 243 static dmu_buf_impl_t * 244 dbuf_find_bonus(objset_t *os, uint64_t object) 245 { 246 dnode_t *dn; 247 dmu_buf_impl_t *db = NULL; 248 249 if (dnode_hold(os, object, FTAG, &dn) == 0) { 250 rw_enter(&dn->dn_struct_rwlock, RW_READER); 251 if (dn->dn_bonus != NULL) { 252 db = dn->dn_bonus; 253 mutex_enter(&db->db_mtx); 254 } 255 rw_exit(&dn->dn_struct_rwlock); 256 dnode_rele(dn, FTAG); 257 } 258 return (db); 259 } 260 261 /* 262 * Insert an entry into the hash table. If there is already an element 263 * equal to elem in the hash table, then the already existing element 264 * will be returned and the new element will not be inserted. 265 * Otherwise returns NULL. 266 */ 267 static dmu_buf_impl_t * 268 dbuf_hash_insert(dmu_buf_impl_t *db) 269 { 270 dbuf_hash_table_t *h = &dbuf_hash_table; 271 objset_t *os = db->db_objset; 272 uint64_t obj = db->db.db_object; 273 int level = db->db_level; 274 uint64_t blkid = db->db_blkid; 275 uint64_t hv = dbuf_hash(os, obj, level, blkid); 276 uint64_t idx = hv & h->hash_table_mask; 277 dmu_buf_impl_t *dbf; 278 279 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 280 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 281 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 282 mutex_enter(&dbf->db_mtx); 283 if (dbf->db_state != DB_EVICTING) { 284 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 285 return (dbf); 286 } 287 mutex_exit(&dbf->db_mtx); 288 } 289 } 290 291 mutex_enter(&db->db_mtx); 292 db->db_hash_next = h->hash_table[idx]; 293 h->hash_table[idx] = db; 294 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 295 atomic_inc_64(&dbuf_hash_count); 296 297 return (NULL); 298 } 299 300 /* 301 * Remove an entry from the hash table. It must be in the EVICTING state. 302 */ 303 static void 304 dbuf_hash_remove(dmu_buf_impl_t *db) 305 { 306 dbuf_hash_table_t *h = &dbuf_hash_table; 307 uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 308 db->db_level, db->db_blkid); 309 uint64_t idx = hv & h->hash_table_mask; 310 dmu_buf_impl_t *dbf, **dbp; 311 312 /* 313 * We musn't hold db_mtx to maintain lock ordering: 314 * DBUF_HASH_MUTEX > db_mtx. 315 */ 316 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 317 ASSERT(db->db_state == DB_EVICTING); 318 ASSERT(!MUTEX_HELD(&db->db_mtx)); 319 320 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 321 dbp = &h->hash_table[idx]; 322 while ((dbf = *dbp) != db) { 323 dbp = &dbf->db_hash_next; 324 ASSERT(dbf != NULL); 325 } 326 *dbp = db->db_hash_next; 327 db->db_hash_next = NULL; 328 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 329 atomic_dec_64(&dbuf_hash_count); 330 } 331 332 typedef enum { 333 DBVU_EVICTING, 334 DBVU_NOT_EVICTING 335 } dbvu_verify_type_t; 336 337 static void 338 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 339 { 340 #ifdef ZFS_DEBUG 341 int64_t holds; 342 343 if (db->db_user == NULL) 344 return; 345 346 /* Only data blocks support the attachment of user data. */ 347 ASSERT(db->db_level == 0); 348 349 /* Clients must resolve a dbuf before attaching user data. */ 350 ASSERT(db->db.db_data != NULL); 351 ASSERT3U(db->db_state, ==, DB_CACHED); 352 353 holds = zfs_refcount_count(&db->db_holds); 354 if (verify_type == DBVU_EVICTING) { 355 /* 356 * Immediate eviction occurs when holds == dirtycnt. 357 * For normal eviction buffers, holds is zero on 358 * eviction, except when dbuf_fix_old_data() calls 359 * dbuf_clear_data(). However, the hold count can grow 360 * during eviction even though db_mtx is held (see 361 * dmu_bonus_hold() for an example), so we can only 362 * test the generic invariant that holds >= dirtycnt. 363 */ 364 ASSERT3U(holds, >=, db->db_dirtycnt); 365 } else { 366 if (db->db_user_immediate_evict == TRUE) 367 ASSERT3U(holds, >=, db->db_dirtycnt); 368 else 369 ASSERT3U(holds, >, 0); 370 } 371 #endif 372 } 373 374 static void 375 dbuf_evict_user(dmu_buf_impl_t *db) 376 { 377 dmu_buf_user_t *dbu = db->db_user; 378 379 ASSERT(MUTEX_HELD(&db->db_mtx)); 380 381 if (dbu == NULL) 382 return; 383 384 dbuf_verify_user(db, DBVU_EVICTING); 385 db->db_user = NULL; 386 387 #ifdef ZFS_DEBUG 388 if (dbu->dbu_clear_on_evict_dbufp != NULL) 389 *dbu->dbu_clear_on_evict_dbufp = NULL; 390 #endif 391 392 /* 393 * There are two eviction callbacks - one that we call synchronously 394 * and one that we invoke via a taskq. The async one is useful for 395 * avoiding lock order reversals and limiting stack depth. 396 * 397 * Note that if we have a sync callback but no async callback, 398 * it's likely that the sync callback will free the structure 399 * containing the dbu. In that case we need to take care to not 400 * dereference dbu after calling the sync evict func. 401 */ 402 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 403 404 if (dbu->dbu_evict_func_sync != NULL) 405 dbu->dbu_evict_func_sync(dbu); 406 407 if (has_async) { 408 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 409 dbu, 0, &dbu->dbu_tqent); 410 } 411 } 412 413 boolean_t 414 dbuf_is_metadata(dmu_buf_impl_t *db) 415 { 416 if (db->db_level > 0) { 417 return (B_TRUE); 418 } else { 419 boolean_t is_metadata; 420 421 DB_DNODE_ENTER(db); 422 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 423 DB_DNODE_EXIT(db); 424 425 return (is_metadata); 426 } 427 } 428 429 /* 430 * This returns whether this dbuf should be stored in the metadata cache, which 431 * is based on whether it's from one of the dnode types that store data related 432 * to traversing dataset hierarchies. 433 */ 434 static boolean_t 435 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 436 { 437 DB_DNODE_ENTER(db); 438 dmu_object_type_t type = DB_DNODE(db)->dn_type; 439 DB_DNODE_EXIT(db); 440 441 /* Check if this dbuf is one of the types we care about */ 442 if (DMU_OT_IS_METADATA_CACHED(type)) { 443 /* If we hit this, then we set something up wrong in dmu_ot */ 444 ASSERT(DMU_OT_IS_METADATA(type)); 445 446 /* 447 * Sanity check for small-memory systems: don't allocate too 448 * much memory for this purpose. 449 */ 450 if (zfs_refcount_count( 451 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 452 dbuf_metadata_cache_max_bytes) { 453 dbuf_metadata_cache_overflow++; 454 DTRACE_PROBE1(dbuf__metadata__cache__overflow, 455 dmu_buf_impl_t *, db); 456 return (B_FALSE); 457 } 458 459 return (B_TRUE); 460 } 461 462 return (B_FALSE); 463 } 464 465 /* 466 * This function *must* return indices evenly distributed between all 467 * sublists of the multilist. This is needed due to how the dbuf eviction 468 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 469 * distributed between all sublists and uses this assumption when 470 * deciding which sublist to evict from and how much to evict from it. 471 */ 472 unsigned int 473 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 474 { 475 dmu_buf_impl_t *db = obj; 476 477 /* 478 * The assumption here, is the hash value for a given 479 * dmu_buf_impl_t will remain constant throughout it's lifetime 480 * (i.e. it's objset, object, level and blkid fields don't change). 481 * Thus, we don't need to store the dbuf's sublist index 482 * on insertion, as this index can be recalculated on removal. 483 * 484 * Also, the low order bits of the hash value are thought to be 485 * distributed evenly. Otherwise, in the case that the multilist 486 * has a power of two number of sublists, each sublists' usage 487 * would not be evenly distributed. 488 */ 489 return (dbuf_hash(db->db_objset, db->db.db_object, 490 db->db_level, db->db_blkid) % 491 multilist_get_num_sublists(ml)); 492 } 493 494 static inline boolean_t 495 dbuf_cache_above_hiwater(void) 496 { 497 uint64_t dbuf_cache_hiwater_bytes = 498 (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 499 500 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 501 dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 502 } 503 504 static inline boolean_t 505 dbuf_cache_above_lowater(void) 506 { 507 uint64_t dbuf_cache_lowater_bytes = 508 (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 509 510 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 511 dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 512 } 513 514 /* 515 * Evict the oldest eligible dbuf from the dbuf cache. 516 */ 517 static void 518 dbuf_evict_one(void) 519 { 520 int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache); 521 multilist_sublist_t *mls = multilist_sublist_lock( 522 dbuf_caches[DB_DBUF_CACHE].cache, idx); 523 524 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 525 526 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 527 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 528 db = multilist_sublist_prev(mls, db); 529 } 530 531 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 532 multilist_sublist_t *, mls); 533 534 if (db != NULL) { 535 multilist_sublist_remove(mls, db); 536 multilist_sublist_unlock(mls); 537 (void) zfs_refcount_remove_many( 538 &dbuf_caches[DB_DBUF_CACHE].size, 539 db->db.db_size, db); 540 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 541 db->db_caching_status = DB_NO_CACHE; 542 dbuf_destroy(db); 543 } else { 544 multilist_sublist_unlock(mls); 545 } 546 } 547 548 /* 549 * The dbuf evict thread is responsible for aging out dbufs from the 550 * cache. Once the cache has reached it's maximum size, dbufs are removed 551 * and destroyed. The eviction thread will continue running until the size 552 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 553 * out of the cache it is destroyed and becomes eligible for arc eviction. 554 */ 555 /* ARGSUSED */ 556 static void 557 dbuf_evict_thread(void *unused) 558 { 559 callb_cpr_t cpr; 560 561 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 562 563 mutex_enter(&dbuf_evict_lock); 564 while (!dbuf_evict_thread_exit) { 565 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 566 CALLB_CPR_SAFE_BEGIN(&cpr); 567 (void) cv_timedwait_hires(&dbuf_evict_cv, 568 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 569 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 570 } 571 mutex_exit(&dbuf_evict_lock); 572 573 /* 574 * Keep evicting as long as we're above the low water mark 575 * for the cache. We do this without holding the locks to 576 * minimize lock contention. 577 */ 578 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 579 dbuf_evict_one(); 580 } 581 582 mutex_enter(&dbuf_evict_lock); 583 } 584 585 dbuf_evict_thread_exit = B_FALSE; 586 cv_broadcast(&dbuf_evict_cv); 587 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 588 thread_exit(); 589 } 590 591 /* 592 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 593 * If the dbuf cache is at its high water mark, then evict a dbuf from the 594 * dbuf cache using the callers context. 595 */ 596 static void 597 dbuf_evict_notify(void) 598 { 599 /* 600 * We check if we should evict without holding the dbuf_evict_lock, 601 * because it's OK to occasionally make the wrong decision here, 602 * and grabbing the lock results in massive lock contention. 603 */ 604 if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 605 dbuf_cache_max_bytes) { 606 if (dbuf_cache_above_hiwater()) 607 dbuf_evict_one(); 608 cv_signal(&dbuf_evict_cv); 609 } 610 } 611 612 void 613 dbuf_init(void) 614 { 615 uint64_t hsize = 1ULL << 16; 616 dbuf_hash_table_t *h = &dbuf_hash_table; 617 int i; 618 619 /* 620 * The hash table is big enough to fill all of physical memory 621 * with an average 4K block size. The table will take up 622 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 623 */ 624 while (hsize * 4096 < physmem * PAGESIZE) 625 hsize <<= 1; 626 627 retry: 628 h->hash_table_mask = hsize - 1; 629 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 630 if (h->hash_table == NULL) { 631 /* XXX - we should really return an error instead of assert */ 632 ASSERT(hsize > (1ULL << 10)); 633 hsize >>= 1; 634 goto retry; 635 } 636 637 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 638 sizeof (dmu_buf_impl_t), 639 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 640 641 for (i = 0; i < DBUF_MUTEXES; i++) 642 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 643 644 /* 645 * Setup the parameters for the dbuf caches. We set the sizes of the 646 * dbuf cache and the metadata cache to 1/32nd and 1/16th (default) 647 * of the size of the ARC, respectively. If the values are set in 648 * /etc/system and they're not greater than the size of the ARC, then 649 * we honor that value. 650 */ 651 if (dbuf_cache_max_bytes == 0 || 652 dbuf_cache_max_bytes >= arc_max_bytes()) { 653 dbuf_cache_max_bytes = arc_max_bytes() >> dbuf_cache_shift; 654 } 655 if (dbuf_metadata_cache_max_bytes == 0 || 656 dbuf_metadata_cache_max_bytes >= arc_max_bytes()) { 657 dbuf_metadata_cache_max_bytes = 658 arc_max_bytes() >> dbuf_metadata_cache_shift; 659 } 660 661 /* 662 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 663 * configuration is not required. 664 */ 665 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 666 667 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 668 dbuf_caches[dcs].cache = 669 multilist_create(sizeof (dmu_buf_impl_t), 670 offsetof(dmu_buf_impl_t, db_cache_link), 671 dbuf_cache_multilist_index_func); 672 zfs_refcount_create(&dbuf_caches[dcs].size); 673 } 674 675 dbuf_evict_thread_exit = B_FALSE; 676 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 677 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 678 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 679 NULL, 0, &p0, TS_RUN, minclsyspri); 680 } 681 682 void 683 dbuf_fini(void) 684 { 685 dbuf_hash_table_t *h = &dbuf_hash_table; 686 int i; 687 688 for (i = 0; i < DBUF_MUTEXES; i++) 689 mutex_destroy(&h->hash_mutexes[i]); 690 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 691 kmem_cache_destroy(dbuf_kmem_cache); 692 taskq_destroy(dbu_evict_taskq); 693 694 mutex_enter(&dbuf_evict_lock); 695 dbuf_evict_thread_exit = B_TRUE; 696 while (dbuf_evict_thread_exit) { 697 cv_signal(&dbuf_evict_cv); 698 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 699 } 700 mutex_exit(&dbuf_evict_lock); 701 702 mutex_destroy(&dbuf_evict_lock); 703 cv_destroy(&dbuf_evict_cv); 704 705 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 706 zfs_refcount_destroy(&dbuf_caches[dcs].size); 707 multilist_destroy(dbuf_caches[dcs].cache); 708 } 709 } 710 711 /* 712 * Other stuff. 713 */ 714 715 #ifdef ZFS_DEBUG 716 static void 717 dbuf_verify(dmu_buf_impl_t *db) 718 { 719 dnode_t *dn; 720 dbuf_dirty_record_t *dr; 721 722 ASSERT(MUTEX_HELD(&db->db_mtx)); 723 724 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 725 return; 726 727 ASSERT(db->db_objset != NULL); 728 DB_DNODE_ENTER(db); 729 dn = DB_DNODE(db); 730 if (dn == NULL) { 731 ASSERT(db->db_parent == NULL); 732 ASSERT(db->db_blkptr == NULL); 733 } else { 734 ASSERT3U(db->db.db_object, ==, dn->dn_object); 735 ASSERT3P(db->db_objset, ==, dn->dn_objset); 736 ASSERT3U(db->db_level, <, dn->dn_nlevels); 737 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 738 db->db_blkid == DMU_SPILL_BLKID || 739 !avl_is_empty(&dn->dn_dbufs)); 740 } 741 if (db->db_blkid == DMU_BONUS_BLKID) { 742 ASSERT(dn != NULL); 743 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 744 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 745 } else if (db->db_blkid == DMU_SPILL_BLKID) { 746 ASSERT(dn != NULL); 747 ASSERT0(db->db.db_offset); 748 } else { 749 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 750 } 751 752 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 753 ASSERT(dr->dr_dbuf == db); 754 755 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 756 ASSERT(dr->dr_dbuf == db); 757 758 /* 759 * We can't assert that db_size matches dn_datablksz because it 760 * can be momentarily different when another thread is doing 761 * dnode_set_blksz(). 762 */ 763 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 764 dr = db->db_data_pending; 765 /* 766 * It should only be modified in syncing context, so 767 * make sure we only have one copy of the data. 768 */ 769 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 770 } 771 772 /* verify db->db_blkptr */ 773 if (db->db_blkptr) { 774 if (db->db_parent == dn->dn_dbuf) { 775 /* db is pointed to by the dnode */ 776 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 777 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 778 ASSERT(db->db_parent == NULL); 779 else 780 ASSERT(db->db_parent != NULL); 781 if (db->db_blkid != DMU_SPILL_BLKID) 782 ASSERT3P(db->db_blkptr, ==, 783 &dn->dn_phys->dn_blkptr[db->db_blkid]); 784 } else { 785 /* db is pointed to by an indirect block */ 786 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 787 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 788 ASSERT3U(db->db_parent->db.db_object, ==, 789 db->db.db_object); 790 /* 791 * dnode_grow_indblksz() can make this fail if we don't 792 * have the struct_rwlock. XXX indblksz no longer 793 * grows. safe to do this now? 794 */ 795 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 796 ASSERT3P(db->db_blkptr, ==, 797 ((blkptr_t *)db->db_parent->db.db_data + 798 db->db_blkid % epb)); 799 } 800 } 801 } 802 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 803 (db->db_buf == NULL || db->db_buf->b_data) && 804 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 805 db->db_state != DB_FILL && !dn->dn_free_txg) { 806 /* 807 * If the blkptr isn't set but they have nonzero data, 808 * it had better be dirty, otherwise we'll lose that 809 * data when we evict this buffer. 810 * 811 * There is an exception to this rule for indirect blocks; in 812 * this case, if the indirect block is a hole, we fill in a few 813 * fields on each of the child blocks (importantly, birth time) 814 * to prevent hole birth times from being lost when you 815 * partially fill in a hole. 816 */ 817 if (db->db_dirtycnt == 0) { 818 if (db->db_level == 0) { 819 uint64_t *buf = db->db.db_data; 820 int i; 821 822 for (i = 0; i < db->db.db_size >> 3; i++) { 823 ASSERT(buf[i] == 0); 824 } 825 } else { 826 blkptr_t *bps = db->db.db_data; 827 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 828 db->db.db_size); 829 /* 830 * We want to verify that all the blkptrs in the 831 * indirect block are holes, but we may have 832 * automatically set up a few fields for them. 833 * We iterate through each blkptr and verify 834 * they only have those fields set. 835 */ 836 for (int i = 0; 837 i < db->db.db_size / sizeof (blkptr_t); 838 i++) { 839 blkptr_t *bp = &bps[i]; 840 ASSERT(ZIO_CHECKSUM_IS_ZERO( 841 &bp->blk_cksum)); 842 ASSERT( 843 DVA_IS_EMPTY(&bp->blk_dva[0]) && 844 DVA_IS_EMPTY(&bp->blk_dva[1]) && 845 DVA_IS_EMPTY(&bp->blk_dva[2])); 846 ASSERT0(bp->blk_fill); 847 ASSERT0(bp->blk_pad[0]); 848 ASSERT0(bp->blk_pad[1]); 849 ASSERT(!BP_IS_EMBEDDED(bp)); 850 ASSERT(BP_IS_HOLE(bp)); 851 ASSERT0(bp->blk_phys_birth); 852 } 853 } 854 } 855 } 856 DB_DNODE_EXIT(db); 857 } 858 #endif 859 860 static void 861 dbuf_clear_data(dmu_buf_impl_t *db) 862 { 863 ASSERT(MUTEX_HELD(&db->db_mtx)); 864 dbuf_evict_user(db); 865 ASSERT3P(db->db_buf, ==, NULL); 866 db->db.db_data = NULL; 867 if (db->db_state != DB_NOFILL) 868 db->db_state = DB_UNCACHED; 869 } 870 871 static void 872 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 873 { 874 ASSERT(MUTEX_HELD(&db->db_mtx)); 875 ASSERT(buf != NULL); 876 877 db->db_buf = buf; 878 ASSERT(buf->b_data != NULL); 879 db->db.db_data = buf->b_data; 880 } 881 882 /* 883 * Loan out an arc_buf for read. Return the loaned arc_buf. 884 */ 885 arc_buf_t * 886 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 887 { 888 arc_buf_t *abuf; 889 890 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 891 mutex_enter(&db->db_mtx); 892 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { 893 int blksz = db->db.db_size; 894 spa_t *spa = db->db_objset->os_spa; 895 896 mutex_exit(&db->db_mtx); 897 abuf = arc_loan_buf(spa, B_FALSE, blksz); 898 bcopy(db->db.db_data, abuf->b_data, blksz); 899 } else { 900 abuf = db->db_buf; 901 arc_loan_inuse_buf(abuf, db); 902 db->db_buf = NULL; 903 dbuf_clear_data(db); 904 mutex_exit(&db->db_mtx); 905 } 906 return (abuf); 907 } 908 909 /* 910 * Calculate which level n block references the data at the level 0 offset 911 * provided. 912 */ 913 uint64_t 914 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 915 { 916 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 917 /* 918 * The level n blkid is equal to the level 0 blkid divided by 919 * the number of level 0s in a level n block. 920 * 921 * The level 0 blkid is offset >> datablkshift = 922 * offset / 2^datablkshift. 923 * 924 * The number of level 0s in a level n is the number of block 925 * pointers in an indirect block, raised to the power of level. 926 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 927 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 928 * 929 * Thus, the level n blkid is: offset / 930 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 931 * = offset / 2^(datablkshift + level * 932 * (indblkshift - SPA_BLKPTRSHIFT)) 933 * = offset >> (datablkshift + level * 934 * (indblkshift - SPA_BLKPTRSHIFT)) 935 */ 936 return (offset >> (dn->dn_datablkshift + level * 937 (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 938 } else { 939 ASSERT3U(offset, <, dn->dn_datablksz); 940 return (0); 941 } 942 } 943 944 static void 945 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 946 arc_buf_t *buf, void *vdb) 947 { 948 dmu_buf_impl_t *db = vdb; 949 950 mutex_enter(&db->db_mtx); 951 ASSERT3U(db->db_state, ==, DB_READ); 952 /* 953 * All reads are synchronous, so we must have a hold on the dbuf 954 */ 955 ASSERT(zfs_refcount_count(&db->db_holds) > 0); 956 ASSERT(db->db_buf == NULL); 957 ASSERT(db->db.db_data == NULL); 958 if (buf == NULL) { 959 /* i/o error */ 960 ASSERT(zio == NULL || zio->io_error != 0); 961 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 962 ASSERT3P(db->db_buf, ==, NULL); 963 db->db_state = DB_UNCACHED; 964 } else if (db->db_level == 0 && db->db_freed_in_flight) { 965 /* we were freed in flight; disregard any error */ 966 ASSERT(zio == NULL || zio->io_error == 0); 967 if (buf == NULL) { 968 buf = arc_alloc_buf(db->db_objset->os_spa, 969 db, DBUF_GET_BUFC_TYPE(db), db->db.db_size); 970 } 971 arc_release(buf, db); 972 bzero(buf->b_data, db->db.db_size); 973 arc_buf_freeze(buf); 974 db->db_freed_in_flight = FALSE; 975 dbuf_set_data(db, buf); 976 db->db_state = DB_CACHED; 977 } else if (buf != NULL) { 978 /* success */ 979 ASSERT(zio == NULL || zio->io_error == 0); 980 dbuf_set_data(db, buf); 981 db->db_state = DB_CACHED; 982 } 983 cv_broadcast(&db->db_changed); 984 dbuf_rele_and_unlock(db, NULL, B_FALSE); 985 } 986 987 static void 988 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 989 { 990 dnode_t *dn; 991 zbookmark_phys_t zb; 992 arc_flags_t aflags = ARC_FLAG_NOWAIT; 993 994 DB_DNODE_ENTER(db); 995 dn = DB_DNODE(db); 996 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 997 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 998 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 999 ASSERT(MUTEX_HELD(&db->db_mtx)); 1000 ASSERT(db->db_state == DB_UNCACHED); 1001 ASSERT(db->db_buf == NULL); 1002 1003 if (db->db_blkid == DMU_BONUS_BLKID) { 1004 /* 1005 * The bonus length stored in the dnode may be less than 1006 * the maximum available space in the bonus buffer. 1007 */ 1008 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 1009 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1010 1011 ASSERT3U(bonuslen, <=, db->db.db_size); 1012 db->db.db_data = zio_buf_alloc(max_bonuslen); 1013 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 1014 if (bonuslen < max_bonuslen) 1015 bzero(db->db.db_data, max_bonuslen); 1016 if (bonuslen) 1017 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 1018 DB_DNODE_EXIT(db); 1019 db->db_state = DB_CACHED; 1020 mutex_exit(&db->db_mtx); 1021 return; 1022 } 1023 1024 /* 1025 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 1026 * processes the delete record and clears the bp while we are waiting 1027 * for the dn_mtx (resulting in a "no" from block_freed). 1028 */ 1029 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 1030 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 1031 BP_IS_HOLE(db->db_blkptr)))) { 1032 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1033 1034 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 1035 db->db.db_size)); 1036 bzero(db->db.db_data, db->db.db_size); 1037 1038 if (db->db_blkptr != NULL && db->db_level > 0 && 1039 BP_IS_HOLE(db->db_blkptr) && 1040 db->db_blkptr->blk_birth != 0) { 1041 blkptr_t *bps = db->db.db_data; 1042 for (int i = 0; i < ((1 << 1043 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 1044 i++) { 1045 blkptr_t *bp = &bps[i]; 1046 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 1047 1 << dn->dn_indblkshift); 1048 BP_SET_LSIZE(bp, 1049 BP_GET_LEVEL(db->db_blkptr) == 1 ? 1050 dn->dn_datablksz : 1051 BP_GET_LSIZE(db->db_blkptr)); 1052 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 1053 BP_SET_LEVEL(bp, 1054 BP_GET_LEVEL(db->db_blkptr) - 1); 1055 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 1056 } 1057 } 1058 DB_DNODE_EXIT(db); 1059 db->db_state = DB_CACHED; 1060 mutex_exit(&db->db_mtx); 1061 return; 1062 } 1063 1064 DB_DNODE_EXIT(db); 1065 1066 db->db_state = DB_READ; 1067 mutex_exit(&db->db_mtx); 1068 1069 if (DBUF_IS_L2CACHEABLE(db)) 1070 aflags |= ARC_FLAG_L2CACHE; 1071 1072 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1073 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1074 db->db.db_object, db->db_level, db->db_blkid); 1075 1076 dbuf_add_ref(db, NULL); 1077 1078 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1079 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1080 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 1081 &aflags, &zb); 1082 } 1083 1084 /* 1085 * This is our just-in-time copy function. It makes a copy of buffers that 1086 * have been modified in a previous transaction group before we access them in 1087 * the current active group. 1088 * 1089 * This function is used in three places: when we are dirtying a buffer for the 1090 * first time in a txg, when we are freeing a range in a dnode that includes 1091 * this buffer, and when we are accessing a buffer which was received compressed 1092 * and later referenced in a WRITE_BYREF record. 1093 * 1094 * Note that when we are called from dbuf_free_range() we do not put a hold on 1095 * the buffer, we just traverse the active dbuf list for the dnode. 1096 */ 1097 static void 1098 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1099 { 1100 dbuf_dirty_record_t *dr = db->db_last_dirty; 1101 1102 ASSERT(MUTEX_HELD(&db->db_mtx)); 1103 ASSERT(db->db.db_data != NULL); 1104 ASSERT(db->db_level == 0); 1105 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1106 1107 if (dr == NULL || 1108 (dr->dt.dl.dr_data != 1109 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1110 return; 1111 1112 /* 1113 * If the last dirty record for this dbuf has not yet synced 1114 * and its referencing the dbuf data, either: 1115 * reset the reference to point to a new copy, 1116 * or (if there a no active holders) 1117 * just null out the current db_data pointer. 1118 */ 1119 ASSERT(dr->dr_txg >= txg - 2); 1120 if (db->db_blkid == DMU_BONUS_BLKID) { 1121 /* Note that the data bufs here are zio_bufs */ 1122 dnode_t *dn = DB_DNODE(db); 1123 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 1124 dr->dt.dl.dr_data = zio_buf_alloc(bonuslen); 1125 arc_space_consume(bonuslen, ARC_SPACE_BONUS); 1126 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); 1127 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1128 int size = arc_buf_size(db->db_buf); 1129 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1130 spa_t *spa = db->db_objset->os_spa; 1131 enum zio_compress compress_type = 1132 arc_get_compression(db->db_buf); 1133 1134 if (compress_type == ZIO_COMPRESS_OFF) { 1135 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1136 } else { 1137 ASSERT3U(type, ==, ARC_BUFC_DATA); 1138 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1139 size, arc_buf_lsize(db->db_buf), compress_type); 1140 } 1141 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 1142 } else { 1143 db->db_buf = NULL; 1144 dbuf_clear_data(db); 1145 } 1146 } 1147 1148 int 1149 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1150 { 1151 int err = 0; 1152 boolean_t prefetch; 1153 dnode_t *dn; 1154 1155 /* 1156 * We don't have to hold the mutex to check db_state because it 1157 * can't be freed while we have a hold on the buffer. 1158 */ 1159 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1160 1161 if (db->db_state == DB_NOFILL) 1162 return (SET_ERROR(EIO)); 1163 1164 DB_DNODE_ENTER(db); 1165 dn = DB_DNODE(db); 1166 if ((flags & DB_RF_HAVESTRUCT) == 0) 1167 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1168 1169 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1170 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 1171 DBUF_IS_CACHEABLE(db); 1172 1173 mutex_enter(&db->db_mtx); 1174 if (db->db_state == DB_CACHED) { 1175 /* 1176 * If the arc buf is compressed, we need to decompress it to 1177 * read the data. This could happen during the "zfs receive" of 1178 * a stream which is compressed and deduplicated. 1179 */ 1180 if (db->db_buf != NULL && 1181 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 1182 dbuf_fix_old_data(db, 1183 spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1184 err = arc_decompress(db->db_buf); 1185 dbuf_set_data(db, db->db_buf); 1186 } 1187 mutex_exit(&db->db_mtx); 1188 if (prefetch) 1189 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1190 if ((flags & DB_RF_HAVESTRUCT) == 0) 1191 rw_exit(&dn->dn_struct_rwlock); 1192 DB_DNODE_EXIT(db); 1193 } else if (db->db_state == DB_UNCACHED) { 1194 spa_t *spa = dn->dn_objset->os_spa; 1195 boolean_t need_wait = B_FALSE; 1196 1197 if (zio == NULL && 1198 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1199 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1200 need_wait = B_TRUE; 1201 } 1202 dbuf_read_impl(db, zio, flags); 1203 1204 /* dbuf_read_impl has dropped db_mtx for us */ 1205 1206 if (prefetch) 1207 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1208 1209 if ((flags & DB_RF_HAVESTRUCT) == 0) 1210 rw_exit(&dn->dn_struct_rwlock); 1211 DB_DNODE_EXIT(db); 1212 1213 if (need_wait) 1214 err = zio_wait(zio); 1215 } else { 1216 /* 1217 * Another reader came in while the dbuf was in flight 1218 * between UNCACHED and CACHED. Either a writer will finish 1219 * writing the buffer (sending the dbuf to CACHED) or the 1220 * first reader's request will reach the read_done callback 1221 * and send the dbuf to CACHED. Otherwise, a failure 1222 * occurred and the dbuf went to UNCACHED. 1223 */ 1224 mutex_exit(&db->db_mtx); 1225 if (prefetch) 1226 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1227 if ((flags & DB_RF_HAVESTRUCT) == 0) 1228 rw_exit(&dn->dn_struct_rwlock); 1229 DB_DNODE_EXIT(db); 1230 1231 /* Skip the wait per the caller's request. */ 1232 mutex_enter(&db->db_mtx); 1233 if ((flags & DB_RF_NEVERWAIT) == 0) { 1234 while (db->db_state == DB_READ || 1235 db->db_state == DB_FILL) { 1236 ASSERT(db->db_state == DB_READ || 1237 (flags & DB_RF_HAVESTRUCT) == 0); 1238 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1239 db, zio_t *, zio); 1240 cv_wait(&db->db_changed, &db->db_mtx); 1241 } 1242 if (db->db_state == DB_UNCACHED) 1243 err = SET_ERROR(EIO); 1244 } 1245 mutex_exit(&db->db_mtx); 1246 } 1247 1248 return (err); 1249 } 1250 1251 static void 1252 dbuf_noread(dmu_buf_impl_t *db) 1253 { 1254 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1255 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1256 mutex_enter(&db->db_mtx); 1257 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1258 cv_wait(&db->db_changed, &db->db_mtx); 1259 if (db->db_state == DB_UNCACHED) { 1260 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1261 spa_t *spa = db->db_objset->os_spa; 1262 1263 ASSERT(db->db_buf == NULL); 1264 ASSERT(db->db.db_data == NULL); 1265 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1266 db->db_state = DB_FILL; 1267 } else if (db->db_state == DB_NOFILL) { 1268 dbuf_clear_data(db); 1269 } else { 1270 ASSERT3U(db->db_state, ==, DB_CACHED); 1271 } 1272 mutex_exit(&db->db_mtx); 1273 } 1274 1275 void 1276 dbuf_unoverride(dbuf_dirty_record_t *dr) 1277 { 1278 dmu_buf_impl_t *db = dr->dr_dbuf; 1279 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1280 uint64_t txg = dr->dr_txg; 1281 1282 ASSERT(MUTEX_HELD(&db->db_mtx)); 1283 /* 1284 * This assert is valid because dmu_sync() expects to be called by 1285 * a zilog's get_data while holding a range lock. This call only 1286 * comes from dbuf_dirty() callers who must also hold a range lock. 1287 */ 1288 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1289 ASSERT(db->db_level == 0); 1290 1291 if (db->db_blkid == DMU_BONUS_BLKID || 1292 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1293 return; 1294 1295 ASSERT(db->db_data_pending != dr); 1296 1297 /* free this block */ 1298 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1299 zio_free(db->db_objset->os_spa, txg, bp); 1300 1301 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1302 dr->dt.dl.dr_nopwrite = B_FALSE; 1303 1304 /* 1305 * Release the already-written buffer, so we leave it in 1306 * a consistent dirty state. Note that all callers are 1307 * modifying the buffer, so they will immediately do 1308 * another (redundant) arc_release(). Therefore, leave 1309 * the buf thawed to save the effort of freezing & 1310 * immediately re-thawing it. 1311 */ 1312 arc_release(dr->dt.dl.dr_data, db); 1313 } 1314 1315 /* 1316 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1317 * data blocks in the free range, so that any future readers will find 1318 * empty blocks. 1319 */ 1320 void 1321 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1322 dmu_tx_t *tx) 1323 { 1324 dmu_buf_impl_t db_search; 1325 dmu_buf_impl_t *db, *db_next; 1326 uint64_t txg = tx->tx_txg; 1327 avl_index_t where; 1328 1329 if (end_blkid > dn->dn_maxblkid && 1330 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1331 end_blkid = dn->dn_maxblkid; 1332 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 1333 1334 db_search.db_level = 0; 1335 db_search.db_blkid = start_blkid; 1336 db_search.db_state = DB_SEARCH; 1337 1338 mutex_enter(&dn->dn_dbufs_mtx); 1339 db = avl_find(&dn->dn_dbufs, &db_search, &where); 1340 ASSERT3P(db, ==, NULL); 1341 1342 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1343 1344 for (; db != NULL; db = db_next) { 1345 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1346 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1347 1348 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1349 break; 1350 } 1351 ASSERT3U(db->db_blkid, >=, start_blkid); 1352 1353 /* found a level 0 buffer in the range */ 1354 mutex_enter(&db->db_mtx); 1355 if (dbuf_undirty(db, tx)) { 1356 /* mutex has been dropped and dbuf destroyed */ 1357 continue; 1358 } 1359 1360 if (db->db_state == DB_UNCACHED || 1361 db->db_state == DB_NOFILL || 1362 db->db_state == DB_EVICTING) { 1363 ASSERT(db->db.db_data == NULL); 1364 mutex_exit(&db->db_mtx); 1365 continue; 1366 } 1367 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1368 /* will be handled in dbuf_read_done or dbuf_rele */ 1369 db->db_freed_in_flight = TRUE; 1370 mutex_exit(&db->db_mtx); 1371 continue; 1372 } 1373 if (zfs_refcount_count(&db->db_holds) == 0) { 1374 ASSERT(db->db_buf); 1375 dbuf_destroy(db); 1376 continue; 1377 } 1378 /* The dbuf is referenced */ 1379 1380 if (db->db_last_dirty != NULL) { 1381 dbuf_dirty_record_t *dr = db->db_last_dirty; 1382 1383 if (dr->dr_txg == txg) { 1384 /* 1385 * This buffer is "in-use", re-adjust the file 1386 * size to reflect that this buffer may 1387 * contain new data when we sync. 1388 */ 1389 if (db->db_blkid != DMU_SPILL_BLKID && 1390 db->db_blkid > dn->dn_maxblkid) 1391 dn->dn_maxblkid = db->db_blkid; 1392 dbuf_unoverride(dr); 1393 } else { 1394 /* 1395 * This dbuf is not dirty in the open context. 1396 * Either uncache it (if its not referenced in 1397 * the open context) or reset its contents to 1398 * empty. 1399 */ 1400 dbuf_fix_old_data(db, txg); 1401 } 1402 } 1403 /* clear the contents if its cached */ 1404 if (db->db_state == DB_CACHED) { 1405 ASSERT(db->db.db_data != NULL); 1406 arc_release(db->db_buf, db); 1407 bzero(db->db.db_data, db->db.db_size); 1408 arc_buf_freeze(db->db_buf); 1409 } 1410 1411 mutex_exit(&db->db_mtx); 1412 } 1413 mutex_exit(&dn->dn_dbufs_mtx); 1414 } 1415 1416 void 1417 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1418 { 1419 arc_buf_t *buf, *obuf; 1420 int osize = db->db.db_size; 1421 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1422 dnode_t *dn; 1423 1424 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1425 1426 DB_DNODE_ENTER(db); 1427 dn = DB_DNODE(db); 1428 1429 /* XXX does *this* func really need the lock? */ 1430 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1431 1432 /* 1433 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1434 * is OK, because there can be no other references to the db 1435 * when we are changing its size, so no concurrent DB_FILL can 1436 * be happening. 1437 */ 1438 /* 1439 * XXX we should be doing a dbuf_read, checking the return 1440 * value and returning that up to our callers 1441 */ 1442 dmu_buf_will_dirty(&db->db, tx); 1443 1444 /* create the data buffer for the new block */ 1445 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1446 1447 /* copy old block data to the new block */ 1448 obuf = db->db_buf; 1449 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1450 /* zero the remainder */ 1451 if (size > osize) 1452 bzero((uint8_t *)buf->b_data + osize, size - osize); 1453 1454 mutex_enter(&db->db_mtx); 1455 dbuf_set_data(db, buf); 1456 arc_buf_destroy(obuf, db); 1457 db->db.db_size = size; 1458 1459 if (db->db_level == 0) { 1460 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1461 db->db_last_dirty->dt.dl.dr_data = buf; 1462 } 1463 mutex_exit(&db->db_mtx); 1464 1465 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1466 DB_DNODE_EXIT(db); 1467 } 1468 1469 void 1470 dbuf_release_bp(dmu_buf_impl_t *db) 1471 { 1472 objset_t *os = db->db_objset; 1473 1474 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1475 ASSERT(arc_released(os->os_phys_buf) || 1476 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1477 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1478 1479 (void) arc_release(db->db_buf, db); 1480 } 1481 1482 /* 1483 * We already have a dirty record for this TXG, and we are being 1484 * dirtied again. 1485 */ 1486 static void 1487 dbuf_redirty(dbuf_dirty_record_t *dr) 1488 { 1489 dmu_buf_impl_t *db = dr->dr_dbuf; 1490 1491 ASSERT(MUTEX_HELD(&db->db_mtx)); 1492 1493 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1494 /* 1495 * If this buffer has already been written out, 1496 * we now need to reset its state. 1497 */ 1498 dbuf_unoverride(dr); 1499 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1500 db->db_state != DB_NOFILL) { 1501 /* Already released on initial dirty, so just thaw. */ 1502 ASSERT(arc_released(db->db_buf)); 1503 arc_buf_thaw(db->db_buf); 1504 } 1505 } 1506 } 1507 1508 dbuf_dirty_record_t * 1509 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1510 { 1511 dnode_t *dn; 1512 objset_t *os; 1513 dbuf_dirty_record_t **drp, *dr; 1514 int drop_struct_lock = FALSE; 1515 int txgoff = tx->tx_txg & TXG_MASK; 1516 1517 ASSERT(tx->tx_txg != 0); 1518 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1519 DMU_TX_DIRTY_BUF(tx, db); 1520 1521 DB_DNODE_ENTER(db); 1522 dn = DB_DNODE(db); 1523 /* 1524 * Shouldn't dirty a regular buffer in syncing context. Private 1525 * objects may be dirtied in syncing context, but only if they 1526 * were already pre-dirtied in open context. 1527 */ 1528 #ifdef DEBUG 1529 if (dn->dn_objset->os_dsl_dataset != NULL) { 1530 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1531 RW_READER, FTAG); 1532 } 1533 ASSERT(!dmu_tx_is_syncing(tx) || 1534 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1535 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1536 dn->dn_objset->os_dsl_dataset == NULL); 1537 if (dn->dn_objset->os_dsl_dataset != NULL) 1538 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1539 #endif 1540 /* 1541 * We make this assert for private objects as well, but after we 1542 * check if we're already dirty. They are allowed to re-dirty 1543 * in syncing context. 1544 */ 1545 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1546 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1547 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1548 1549 mutex_enter(&db->db_mtx); 1550 /* 1551 * XXX make this true for indirects too? The problem is that 1552 * transactions created with dmu_tx_create_assigned() from 1553 * syncing context don't bother holding ahead. 1554 */ 1555 ASSERT(db->db_level != 0 || 1556 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1557 db->db_state == DB_NOFILL); 1558 1559 mutex_enter(&dn->dn_mtx); 1560 /* 1561 * Don't set dirtyctx to SYNC if we're just modifying this as we 1562 * initialize the objset. 1563 */ 1564 if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1565 if (dn->dn_objset->os_dsl_dataset != NULL) { 1566 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1567 RW_READER, FTAG); 1568 } 1569 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1570 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1571 DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1572 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1573 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1574 } 1575 if (dn->dn_objset->os_dsl_dataset != NULL) { 1576 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1577 FTAG); 1578 } 1579 } 1580 1581 if (tx->tx_txg > dn->dn_dirty_txg) 1582 dn->dn_dirty_txg = tx->tx_txg; 1583 mutex_exit(&dn->dn_mtx); 1584 1585 if (db->db_blkid == DMU_SPILL_BLKID) 1586 dn->dn_have_spill = B_TRUE; 1587 1588 /* 1589 * If this buffer is already dirty, we're done. 1590 */ 1591 drp = &db->db_last_dirty; 1592 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1593 db->db.db_object == DMU_META_DNODE_OBJECT); 1594 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1595 drp = &dr->dr_next; 1596 if (dr && dr->dr_txg == tx->tx_txg) { 1597 DB_DNODE_EXIT(db); 1598 1599 dbuf_redirty(dr); 1600 mutex_exit(&db->db_mtx); 1601 return (dr); 1602 } 1603 1604 /* 1605 * Only valid if not already dirty. 1606 */ 1607 ASSERT(dn->dn_object == 0 || 1608 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1609 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1610 1611 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1612 1613 /* 1614 * We should only be dirtying in syncing context if it's the 1615 * mos or we're initializing the os or it's a special object. 1616 * However, we are allowed to dirty in syncing context provided 1617 * we already dirtied it in open context. Hence we must make 1618 * this assertion only if we're not already dirty. 1619 */ 1620 os = dn->dn_objset; 1621 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1622 #ifdef DEBUG 1623 if (dn->dn_objset->os_dsl_dataset != NULL) 1624 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 1625 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1626 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1627 if (dn->dn_objset->os_dsl_dataset != NULL) 1628 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1629 #endif 1630 ASSERT(db->db.db_size != 0); 1631 1632 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1633 1634 if (db->db_blkid != DMU_BONUS_BLKID) { 1635 dmu_objset_willuse_space(os, db->db.db_size, tx); 1636 } 1637 1638 /* 1639 * If this buffer is dirty in an old transaction group we need 1640 * to make a copy of it so that the changes we make in this 1641 * transaction group won't leak out when we sync the older txg. 1642 */ 1643 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1644 if (db->db_level == 0) { 1645 void *data_old = db->db_buf; 1646 1647 if (db->db_state != DB_NOFILL) { 1648 if (db->db_blkid == DMU_BONUS_BLKID) { 1649 dbuf_fix_old_data(db, tx->tx_txg); 1650 data_old = db->db.db_data; 1651 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1652 /* 1653 * Release the data buffer from the cache so 1654 * that we can modify it without impacting 1655 * possible other users of this cached data 1656 * block. Note that indirect blocks and 1657 * private objects are not released until the 1658 * syncing state (since they are only modified 1659 * then). 1660 */ 1661 arc_release(db->db_buf, db); 1662 dbuf_fix_old_data(db, tx->tx_txg); 1663 data_old = db->db_buf; 1664 } 1665 ASSERT(data_old != NULL); 1666 } 1667 dr->dt.dl.dr_data = data_old; 1668 } else { 1669 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1670 list_create(&dr->dt.di.dr_children, 1671 sizeof (dbuf_dirty_record_t), 1672 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1673 } 1674 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 1675 dr->dr_accounted = db->db.db_size; 1676 dr->dr_dbuf = db; 1677 dr->dr_txg = tx->tx_txg; 1678 dr->dr_next = *drp; 1679 *drp = dr; 1680 1681 /* 1682 * We could have been freed_in_flight between the dbuf_noread 1683 * and dbuf_dirty. We win, as though the dbuf_noread() had 1684 * happened after the free. 1685 */ 1686 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1687 db->db_blkid != DMU_SPILL_BLKID) { 1688 mutex_enter(&dn->dn_mtx); 1689 if (dn->dn_free_ranges[txgoff] != NULL) { 1690 range_tree_clear(dn->dn_free_ranges[txgoff], 1691 db->db_blkid, 1); 1692 } 1693 mutex_exit(&dn->dn_mtx); 1694 db->db_freed_in_flight = FALSE; 1695 } 1696 1697 /* 1698 * This buffer is now part of this txg 1699 */ 1700 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1701 db->db_dirtycnt += 1; 1702 ASSERT3U(db->db_dirtycnt, <=, 3); 1703 1704 mutex_exit(&db->db_mtx); 1705 1706 if (db->db_blkid == DMU_BONUS_BLKID || 1707 db->db_blkid == DMU_SPILL_BLKID) { 1708 mutex_enter(&dn->dn_mtx); 1709 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1710 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1711 mutex_exit(&dn->dn_mtx); 1712 dnode_setdirty(dn, tx); 1713 DB_DNODE_EXIT(db); 1714 return (dr); 1715 } 1716 1717 /* 1718 * The dn_struct_rwlock prevents db_blkptr from changing 1719 * due to a write from syncing context completing 1720 * while we are running, so we want to acquire it before 1721 * looking at db_blkptr. 1722 */ 1723 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1724 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1725 drop_struct_lock = TRUE; 1726 } 1727 1728 /* 1729 * We need to hold the dn_struct_rwlock to make this assertion, 1730 * because it protects dn_phys / dn_next_nlevels from changing. 1731 */ 1732 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1733 dn->dn_phys->dn_nlevels > db->db_level || 1734 dn->dn_next_nlevels[txgoff] > db->db_level || 1735 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1736 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1737 1738 /* 1739 * If we are overwriting a dedup BP, then unless it is snapshotted, 1740 * when we get to syncing context we will need to decrement its 1741 * refcount in the DDT. Prefetch the relevant DDT block so that 1742 * syncing context won't have to wait for the i/o. 1743 */ 1744 ddt_prefetch(os->os_spa, db->db_blkptr); 1745 1746 if (db->db_level == 0) { 1747 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1748 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1749 } 1750 1751 if (db->db_level+1 < dn->dn_nlevels) { 1752 dmu_buf_impl_t *parent = db->db_parent; 1753 dbuf_dirty_record_t *di; 1754 int parent_held = FALSE; 1755 1756 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1757 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1758 1759 parent = dbuf_hold_level(dn, db->db_level+1, 1760 db->db_blkid >> epbs, FTAG); 1761 ASSERT(parent != NULL); 1762 parent_held = TRUE; 1763 } 1764 if (drop_struct_lock) 1765 rw_exit(&dn->dn_struct_rwlock); 1766 ASSERT3U(db->db_level+1, ==, parent->db_level); 1767 di = dbuf_dirty(parent, tx); 1768 if (parent_held) 1769 dbuf_rele(parent, FTAG); 1770 1771 mutex_enter(&db->db_mtx); 1772 /* 1773 * Since we've dropped the mutex, it's possible that 1774 * dbuf_undirty() might have changed this out from under us. 1775 */ 1776 if (db->db_last_dirty == dr || 1777 dn->dn_object == DMU_META_DNODE_OBJECT) { 1778 mutex_enter(&di->dt.di.dr_mtx); 1779 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1780 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1781 list_insert_tail(&di->dt.di.dr_children, dr); 1782 mutex_exit(&di->dt.di.dr_mtx); 1783 dr->dr_parent = di; 1784 } 1785 mutex_exit(&db->db_mtx); 1786 } else { 1787 ASSERT(db->db_level+1 == dn->dn_nlevels); 1788 ASSERT(db->db_blkid < dn->dn_nblkptr); 1789 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1790 mutex_enter(&dn->dn_mtx); 1791 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1792 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1793 mutex_exit(&dn->dn_mtx); 1794 if (drop_struct_lock) 1795 rw_exit(&dn->dn_struct_rwlock); 1796 } 1797 1798 dnode_setdirty(dn, tx); 1799 DB_DNODE_EXIT(db); 1800 return (dr); 1801 } 1802 1803 /* 1804 * Undirty a buffer in the transaction group referenced by the given 1805 * transaction. Return whether this evicted the dbuf. 1806 */ 1807 static boolean_t 1808 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1809 { 1810 dnode_t *dn; 1811 uint64_t txg = tx->tx_txg; 1812 dbuf_dirty_record_t *dr, **drp; 1813 1814 ASSERT(txg != 0); 1815 1816 /* 1817 * Due to our use of dn_nlevels below, this can only be called 1818 * in open context, unless we are operating on the MOS. 1819 * From syncing context, dn_nlevels may be different from the 1820 * dn_nlevels used when dbuf was dirtied. 1821 */ 1822 ASSERT(db->db_objset == 1823 dmu_objset_pool(db->db_objset)->dp_meta_objset || 1824 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1825 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1826 ASSERT0(db->db_level); 1827 ASSERT(MUTEX_HELD(&db->db_mtx)); 1828 1829 /* 1830 * If this buffer is not dirty, we're done. 1831 */ 1832 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1833 if (dr->dr_txg <= txg) 1834 break; 1835 if (dr == NULL || dr->dr_txg < txg) 1836 return (B_FALSE); 1837 ASSERT(dr->dr_txg == txg); 1838 ASSERT(dr->dr_dbuf == db); 1839 1840 DB_DNODE_ENTER(db); 1841 dn = DB_DNODE(db); 1842 1843 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1844 1845 ASSERT(db->db.db_size != 0); 1846 1847 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 1848 dr->dr_accounted, txg); 1849 1850 *drp = dr->dr_next; 1851 1852 /* 1853 * Note that there are three places in dbuf_dirty() 1854 * where this dirty record may be put on a list. 1855 * Make sure to do a list_remove corresponding to 1856 * every one of those list_insert calls. 1857 */ 1858 if (dr->dr_parent) { 1859 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1860 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1861 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1862 } else if (db->db_blkid == DMU_SPILL_BLKID || 1863 db->db_level + 1 == dn->dn_nlevels) { 1864 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1865 mutex_enter(&dn->dn_mtx); 1866 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1867 mutex_exit(&dn->dn_mtx); 1868 } 1869 DB_DNODE_EXIT(db); 1870 1871 if (db->db_state != DB_NOFILL) { 1872 dbuf_unoverride(dr); 1873 1874 ASSERT(db->db_buf != NULL); 1875 ASSERT(dr->dt.dl.dr_data != NULL); 1876 if (dr->dt.dl.dr_data != db->db_buf) 1877 arc_buf_destroy(dr->dt.dl.dr_data, db); 1878 } 1879 1880 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1881 1882 ASSERT(db->db_dirtycnt > 0); 1883 db->db_dirtycnt -= 1; 1884 1885 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1886 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1887 dbuf_destroy(db); 1888 return (B_TRUE); 1889 } 1890 1891 return (B_FALSE); 1892 } 1893 1894 void 1895 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1896 { 1897 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1898 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1899 1900 ASSERT(tx->tx_txg != 0); 1901 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1902 1903 /* 1904 * Quick check for dirtyness. For already dirty blocks, this 1905 * reduces runtime of this function by >90%, and overall performance 1906 * by 50% for some workloads (e.g. file deletion with indirect blocks 1907 * cached). 1908 */ 1909 mutex_enter(&db->db_mtx); 1910 dbuf_dirty_record_t *dr; 1911 for (dr = db->db_last_dirty; 1912 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 1913 /* 1914 * It's possible that it is already dirty but not cached, 1915 * because there are some calls to dbuf_dirty() that don't 1916 * go through dmu_buf_will_dirty(). 1917 */ 1918 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 1919 /* This dbuf is already dirty and cached. */ 1920 dbuf_redirty(dr); 1921 mutex_exit(&db->db_mtx); 1922 return; 1923 } 1924 } 1925 mutex_exit(&db->db_mtx); 1926 1927 DB_DNODE_ENTER(db); 1928 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1929 rf |= DB_RF_HAVESTRUCT; 1930 DB_DNODE_EXIT(db); 1931 (void) dbuf_read(db, NULL, rf); 1932 (void) dbuf_dirty(db, tx); 1933 } 1934 1935 void 1936 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1937 { 1938 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1939 1940 db->db_state = DB_NOFILL; 1941 1942 dmu_buf_will_fill(db_fake, tx); 1943 } 1944 1945 void 1946 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1947 { 1948 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1949 1950 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1951 ASSERT(tx->tx_txg != 0); 1952 ASSERT(db->db_level == 0); 1953 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1954 1955 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1956 dmu_tx_private_ok(tx)); 1957 1958 dbuf_noread(db); 1959 (void) dbuf_dirty(db, tx); 1960 } 1961 1962 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1963 /* ARGSUSED */ 1964 void 1965 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1966 { 1967 mutex_enter(&db->db_mtx); 1968 DBUF_VERIFY(db); 1969 1970 if (db->db_state == DB_FILL) { 1971 if (db->db_level == 0 && db->db_freed_in_flight) { 1972 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1973 /* we were freed while filling */ 1974 /* XXX dbuf_undirty? */ 1975 bzero(db->db.db_data, db->db.db_size); 1976 db->db_freed_in_flight = FALSE; 1977 } 1978 db->db_state = DB_CACHED; 1979 cv_broadcast(&db->db_changed); 1980 } 1981 mutex_exit(&db->db_mtx); 1982 } 1983 1984 void 1985 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 1986 bp_embedded_type_t etype, enum zio_compress comp, 1987 int uncompressed_size, int compressed_size, int byteorder, 1988 dmu_tx_t *tx) 1989 { 1990 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1991 struct dirty_leaf *dl; 1992 dmu_object_type_t type; 1993 1994 if (etype == BP_EMBEDDED_TYPE_DATA) { 1995 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1996 SPA_FEATURE_EMBEDDED_DATA)); 1997 } 1998 1999 DB_DNODE_ENTER(db); 2000 type = DB_DNODE(db)->dn_type; 2001 DB_DNODE_EXIT(db); 2002 2003 ASSERT0(db->db_level); 2004 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2005 2006 dmu_buf_will_not_fill(dbuf, tx); 2007 2008 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 2009 dl = &db->db_last_dirty->dt.dl; 2010 encode_embedded_bp_compressed(&dl->dr_overridden_by, 2011 data, comp, uncompressed_size, compressed_size); 2012 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 2013 BP_SET_TYPE(&dl->dr_overridden_by, type); 2014 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 2015 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 2016 2017 dl->dr_override_state = DR_OVERRIDDEN; 2018 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 2019 } 2020 2021 /* 2022 * Directly assign a provided arc buf to a given dbuf if it's not referenced 2023 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 2024 */ 2025 void 2026 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 2027 { 2028 ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2029 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2030 ASSERT(db->db_level == 0); 2031 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 2032 ASSERT(buf != NULL); 2033 ASSERT(arc_buf_lsize(buf) == db->db.db_size); 2034 ASSERT(tx->tx_txg != 0); 2035 2036 arc_return_buf(buf, db); 2037 ASSERT(arc_released(buf)); 2038 2039 mutex_enter(&db->db_mtx); 2040 2041 while (db->db_state == DB_READ || db->db_state == DB_FILL) 2042 cv_wait(&db->db_changed, &db->db_mtx); 2043 2044 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 2045 2046 if (db->db_state == DB_CACHED && 2047 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2048 mutex_exit(&db->db_mtx); 2049 (void) dbuf_dirty(db, tx); 2050 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 2051 arc_buf_destroy(buf, db); 2052 xuio_stat_wbuf_copied(); 2053 return; 2054 } 2055 2056 xuio_stat_wbuf_nocopy(); 2057 if (db->db_state == DB_CACHED) { 2058 dbuf_dirty_record_t *dr = db->db_last_dirty; 2059 2060 ASSERT(db->db_buf != NULL); 2061 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 2062 ASSERT(dr->dt.dl.dr_data == db->db_buf); 2063 if (!arc_released(db->db_buf)) { 2064 ASSERT(dr->dt.dl.dr_override_state == 2065 DR_OVERRIDDEN); 2066 arc_release(db->db_buf, db); 2067 } 2068 dr->dt.dl.dr_data = buf; 2069 arc_buf_destroy(db->db_buf, db); 2070 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2071 arc_release(db->db_buf, db); 2072 arc_buf_destroy(db->db_buf, db); 2073 } 2074 db->db_buf = NULL; 2075 } 2076 ASSERT(db->db_buf == NULL); 2077 dbuf_set_data(db, buf); 2078 db->db_state = DB_FILL; 2079 mutex_exit(&db->db_mtx); 2080 (void) dbuf_dirty(db, tx); 2081 dmu_buf_fill_done(&db->db, tx); 2082 } 2083 2084 void 2085 dbuf_destroy(dmu_buf_impl_t *db) 2086 { 2087 dnode_t *dn; 2088 dmu_buf_impl_t *parent = db->db_parent; 2089 dmu_buf_impl_t *dndb; 2090 2091 ASSERT(MUTEX_HELD(&db->db_mtx)); 2092 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2093 2094 if (db->db_buf != NULL) { 2095 arc_buf_destroy(db->db_buf, db); 2096 db->db_buf = NULL; 2097 } 2098 2099 if (db->db_blkid == DMU_BONUS_BLKID) { 2100 int slots = DB_DNODE(db)->dn_num_slots; 2101 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 2102 if (db->db.db_data != NULL) { 2103 zio_buf_free(db->db.db_data, bonuslen); 2104 arc_space_return(bonuslen, ARC_SPACE_BONUS); 2105 db->db_state = DB_UNCACHED; 2106 } 2107 } 2108 2109 dbuf_clear_data(db); 2110 2111 if (multilist_link_active(&db->db_cache_link)) { 2112 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2113 db->db_caching_status == DB_DBUF_METADATA_CACHE); 2114 2115 multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2116 (void) zfs_refcount_remove_many( 2117 &dbuf_caches[db->db_caching_status].size, 2118 db->db.db_size, db); 2119 2120 db->db_caching_status = DB_NO_CACHE; 2121 } 2122 2123 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2124 ASSERT(db->db_data_pending == NULL); 2125 2126 db->db_state = DB_EVICTING; 2127 db->db_blkptr = NULL; 2128 2129 /* 2130 * Now that db_state is DB_EVICTING, nobody else can find this via 2131 * the hash table. We can now drop db_mtx, which allows us to 2132 * acquire the dn_dbufs_mtx. 2133 */ 2134 mutex_exit(&db->db_mtx); 2135 2136 DB_DNODE_ENTER(db); 2137 dn = DB_DNODE(db); 2138 dndb = dn->dn_dbuf; 2139 if (db->db_blkid != DMU_BONUS_BLKID) { 2140 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2141 if (needlock) 2142 mutex_enter(&dn->dn_dbufs_mtx); 2143 avl_remove(&dn->dn_dbufs, db); 2144 atomic_dec_32(&dn->dn_dbufs_count); 2145 membar_producer(); 2146 DB_DNODE_EXIT(db); 2147 if (needlock) 2148 mutex_exit(&dn->dn_dbufs_mtx); 2149 /* 2150 * Decrementing the dbuf count means that the hold corresponding 2151 * to the removed dbuf is no longer discounted in dnode_move(), 2152 * so the dnode cannot be moved until after we release the hold. 2153 * The membar_producer() ensures visibility of the decremented 2154 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2155 * release any lock. 2156 */ 2157 mutex_enter(&dn->dn_mtx); 2158 dnode_rele_and_unlock(dn, db, B_TRUE); 2159 db->db_dnode_handle = NULL; 2160 2161 dbuf_hash_remove(db); 2162 } else { 2163 DB_DNODE_EXIT(db); 2164 } 2165 2166 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2167 2168 db->db_parent = NULL; 2169 2170 ASSERT(db->db_buf == NULL); 2171 ASSERT(db->db.db_data == NULL); 2172 ASSERT(db->db_hash_next == NULL); 2173 ASSERT(db->db_blkptr == NULL); 2174 ASSERT(db->db_data_pending == NULL); 2175 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2176 ASSERT(!multilist_link_active(&db->db_cache_link)); 2177 2178 kmem_cache_free(dbuf_kmem_cache, db); 2179 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2180 2181 /* 2182 * If this dbuf is referenced from an indirect dbuf, 2183 * decrement the ref count on the indirect dbuf. 2184 */ 2185 if (parent && parent != dndb) { 2186 mutex_enter(&parent->db_mtx); 2187 dbuf_rele_and_unlock(parent, db, B_TRUE); 2188 } 2189 } 2190 2191 /* 2192 * Note: While bpp will always be updated if the function returns success, 2193 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2194 * this happens when the dnode is the meta-dnode, or a userused or groupused 2195 * object. 2196 */ 2197 static int 2198 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2199 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2200 { 2201 *parentp = NULL; 2202 *bpp = NULL; 2203 2204 ASSERT(blkid != DMU_BONUS_BLKID); 2205 2206 if (blkid == DMU_SPILL_BLKID) { 2207 mutex_enter(&dn->dn_mtx); 2208 if (dn->dn_have_spill && 2209 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 2210 *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 2211 else 2212 *bpp = NULL; 2213 dbuf_add_ref(dn->dn_dbuf, NULL); 2214 *parentp = dn->dn_dbuf; 2215 mutex_exit(&dn->dn_mtx); 2216 return (0); 2217 } 2218 2219 int nlevels = 2220 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 2221 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2222 2223 ASSERT3U(level * epbs, <, 64); 2224 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2225 /* 2226 * This assertion shouldn't trip as long as the max indirect block size 2227 * is less than 1M. The reason for this is that up to that point, 2228 * the number of levels required to address an entire object with blocks 2229 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 2230 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 2231 * (i.e. we can address the entire object), objects will all use at most 2232 * N-1 levels and the assertion won't overflow. However, once epbs is 2233 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 2234 * enough to address an entire object, so objects will have 5 levels, 2235 * but then this assertion will overflow. 2236 * 2237 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 2238 * need to redo this logic to handle overflows. 2239 */ 2240 ASSERT(level >= nlevels || 2241 ((nlevels - level - 1) * epbs) + 2242 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2243 if (level >= nlevels || 2244 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 2245 ((nlevels - level - 1) * epbs)) || 2246 (fail_sparse && 2247 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2248 /* the buffer has no parent yet */ 2249 return (SET_ERROR(ENOENT)); 2250 } else if (level < nlevels-1) { 2251 /* this block is referenced from an indirect block */ 2252 int err = dbuf_hold_impl(dn, level+1, 2253 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2254 if (err) 2255 return (err); 2256 err = dbuf_read(*parentp, NULL, 2257 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2258 if (err) { 2259 dbuf_rele(*parentp, NULL); 2260 *parentp = NULL; 2261 return (err); 2262 } 2263 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2264 (blkid & ((1ULL << epbs) - 1)); 2265 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 2266 ASSERT(BP_IS_HOLE(*bpp)); 2267 return (0); 2268 } else { 2269 /* the block is referenced from the dnode */ 2270 ASSERT3U(level, ==, nlevels-1); 2271 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2272 blkid < dn->dn_phys->dn_nblkptr); 2273 if (dn->dn_dbuf) { 2274 dbuf_add_ref(dn->dn_dbuf, NULL); 2275 *parentp = dn->dn_dbuf; 2276 } 2277 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2278 return (0); 2279 } 2280 } 2281 2282 static dmu_buf_impl_t * 2283 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2284 dmu_buf_impl_t *parent, blkptr_t *blkptr) 2285 { 2286 objset_t *os = dn->dn_objset; 2287 dmu_buf_impl_t *db, *odb; 2288 2289 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2290 ASSERT(dn->dn_type != DMU_OT_NONE); 2291 2292 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2293 2294 db->db_objset = os; 2295 db->db.db_object = dn->dn_object; 2296 db->db_level = level; 2297 db->db_blkid = blkid; 2298 db->db_last_dirty = NULL; 2299 db->db_dirtycnt = 0; 2300 db->db_dnode_handle = dn->dn_handle; 2301 db->db_parent = parent; 2302 db->db_blkptr = blkptr; 2303 2304 db->db_user = NULL; 2305 db->db_user_immediate_evict = FALSE; 2306 db->db_freed_in_flight = FALSE; 2307 db->db_pending_evict = FALSE; 2308 2309 if (blkid == DMU_BONUS_BLKID) { 2310 ASSERT3P(parent, ==, dn->dn_dbuf); 2311 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 2312 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 2313 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 2314 db->db.db_offset = DMU_BONUS_BLKID; 2315 db->db_state = DB_UNCACHED; 2316 db->db_caching_status = DB_NO_CACHE; 2317 /* the bonus dbuf is not placed in the hash table */ 2318 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2319 return (db); 2320 } else if (blkid == DMU_SPILL_BLKID) { 2321 db->db.db_size = (blkptr != NULL) ? 2322 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 2323 db->db.db_offset = 0; 2324 } else { 2325 int blocksize = 2326 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2327 db->db.db_size = blocksize; 2328 db->db.db_offset = db->db_blkid * blocksize; 2329 } 2330 2331 /* 2332 * Hold the dn_dbufs_mtx while we get the new dbuf 2333 * in the hash table *and* added to the dbufs list. 2334 * This prevents a possible deadlock with someone 2335 * trying to look up this dbuf before its added to the 2336 * dn_dbufs list. 2337 */ 2338 mutex_enter(&dn->dn_dbufs_mtx); 2339 db->db_state = DB_EVICTING; 2340 if ((odb = dbuf_hash_insert(db)) != NULL) { 2341 /* someone else inserted it first */ 2342 kmem_cache_free(dbuf_kmem_cache, db); 2343 mutex_exit(&dn->dn_dbufs_mtx); 2344 return (odb); 2345 } 2346 avl_add(&dn->dn_dbufs, db); 2347 2348 db->db_state = DB_UNCACHED; 2349 db->db_caching_status = DB_NO_CACHE; 2350 mutex_exit(&dn->dn_dbufs_mtx); 2351 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2352 2353 if (parent && parent != dn->dn_dbuf) 2354 dbuf_add_ref(parent, db); 2355 2356 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2357 zfs_refcount_count(&dn->dn_holds) > 0); 2358 (void) zfs_refcount_add(&dn->dn_holds, db); 2359 atomic_inc_32(&dn->dn_dbufs_count); 2360 2361 dprintf_dbuf(db, "db=%p\n", db); 2362 2363 return (db); 2364 } 2365 2366 typedef struct dbuf_prefetch_arg { 2367 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2368 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2369 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2370 int dpa_curlevel; /* The current level that we're reading */ 2371 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2372 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2373 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2374 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2375 } dbuf_prefetch_arg_t; 2376 2377 /* 2378 * Actually issue the prefetch read for the block given. 2379 */ 2380 static void 2381 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2382 { 2383 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2384 return; 2385 2386 arc_flags_t aflags = 2387 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2388 2389 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2390 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2391 ASSERT(dpa->dpa_zio != NULL); 2392 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2393 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2394 &aflags, &dpa->dpa_zb); 2395 } 2396 2397 /* 2398 * Called when an indirect block above our prefetch target is read in. This 2399 * will either read in the next indirect block down the tree or issue the actual 2400 * prefetch if the next block down is our target. 2401 */ 2402 static void 2403 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 2404 const blkptr_t *iobp, arc_buf_t *abuf, void *private) 2405 { 2406 dbuf_prefetch_arg_t *dpa = private; 2407 2408 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2409 ASSERT3S(dpa->dpa_curlevel, >, 0); 2410 2411 if (abuf == NULL) { 2412 ASSERT(zio == NULL || zio->io_error != 0); 2413 kmem_free(dpa, sizeof (*dpa)); 2414 return; 2415 } 2416 ASSERT(zio == NULL || zio->io_error == 0); 2417 2418 /* 2419 * The dpa_dnode is only valid if we are called with a NULL 2420 * zio. This indicates that the arc_read() returned without 2421 * first calling zio_read() to issue a physical read. Once 2422 * a physical read is made the dpa_dnode must be invalidated 2423 * as the locks guarding it may have been dropped. If the 2424 * dpa_dnode is still valid, then we want to add it to the dbuf 2425 * cache. To do so, we must hold the dbuf associated with the block 2426 * we just prefetched, read its contents so that we associate it 2427 * with an arc_buf_t, and then release it. 2428 */ 2429 if (zio != NULL) { 2430 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2431 if (zio->io_flags & ZIO_FLAG_RAW) { 2432 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2433 } else { 2434 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2435 } 2436 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2437 2438 dpa->dpa_dnode = NULL; 2439 } else if (dpa->dpa_dnode != NULL) { 2440 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2441 (dpa->dpa_epbs * (dpa->dpa_curlevel - 2442 dpa->dpa_zb.zb_level)); 2443 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2444 dpa->dpa_curlevel, curblkid, FTAG); 2445 (void) dbuf_read(db, NULL, 2446 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2447 dbuf_rele(db, FTAG); 2448 } 2449 2450 dpa->dpa_curlevel--; 2451 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2452 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2453 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2454 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2455 2456 if (BP_IS_HOLE(bp)) { 2457 kmem_free(dpa, sizeof (*dpa)); 2458 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2459 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2460 dbuf_issue_final_prefetch(dpa, bp); 2461 kmem_free(dpa, sizeof (*dpa)); 2462 } else { 2463 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2464 zbookmark_phys_t zb; 2465 2466 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2467 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 2468 iter_aflags |= ARC_FLAG_L2CACHE; 2469 2470 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2471 2472 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2473 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2474 2475 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2476 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2477 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2478 &iter_aflags, &zb); 2479 } 2480 2481 arc_buf_destroy(abuf, private); 2482 } 2483 2484 /* 2485 * Issue prefetch reads for the given block on the given level. If the indirect 2486 * blocks above that block are not in memory, we will read them in 2487 * asynchronously. As a result, this call never blocks waiting for a read to 2488 * complete. 2489 */ 2490 void 2491 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2492 arc_flags_t aflags) 2493 { 2494 blkptr_t bp; 2495 int epbs, nlevels, curlevel; 2496 uint64_t curblkid; 2497 2498 ASSERT(blkid != DMU_BONUS_BLKID); 2499 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2500 2501 if (blkid > dn->dn_maxblkid) 2502 return; 2503 2504 if (dnode_block_freed(dn, blkid)) 2505 return; 2506 2507 /* 2508 * This dnode hasn't been written to disk yet, so there's nothing to 2509 * prefetch. 2510 */ 2511 nlevels = dn->dn_phys->dn_nlevels; 2512 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2513 return; 2514 2515 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2516 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2517 return; 2518 2519 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2520 level, blkid); 2521 if (db != NULL) { 2522 mutex_exit(&db->db_mtx); 2523 /* 2524 * This dbuf already exists. It is either CACHED, or 2525 * (we assume) about to be read or filled. 2526 */ 2527 return; 2528 } 2529 2530 /* 2531 * Find the closest ancestor (indirect block) of the target block 2532 * that is present in the cache. In this indirect block, we will 2533 * find the bp that is at curlevel, curblkid. 2534 */ 2535 curlevel = level; 2536 curblkid = blkid; 2537 while (curlevel < nlevels - 1) { 2538 int parent_level = curlevel + 1; 2539 uint64_t parent_blkid = curblkid >> epbs; 2540 dmu_buf_impl_t *db; 2541 2542 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2543 FALSE, TRUE, FTAG, &db) == 0) { 2544 blkptr_t *bpp = db->db_buf->b_data; 2545 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2546 dbuf_rele(db, FTAG); 2547 break; 2548 } 2549 2550 curlevel = parent_level; 2551 curblkid = parent_blkid; 2552 } 2553 2554 if (curlevel == nlevels - 1) { 2555 /* No cached indirect blocks found. */ 2556 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2557 bp = dn->dn_phys->dn_blkptr[curblkid]; 2558 } 2559 if (BP_IS_HOLE(&bp)) 2560 return; 2561 2562 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2563 2564 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2565 ZIO_FLAG_CANFAIL); 2566 2567 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2568 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2569 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2570 dn->dn_object, level, blkid); 2571 dpa->dpa_curlevel = curlevel; 2572 dpa->dpa_prio = prio; 2573 dpa->dpa_aflags = aflags; 2574 dpa->dpa_spa = dn->dn_objset->os_spa; 2575 dpa->dpa_dnode = dn; 2576 dpa->dpa_epbs = epbs; 2577 dpa->dpa_zio = pio; 2578 2579 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2580 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2581 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 2582 2583 /* 2584 * If we have the indirect just above us, no need to do the asynchronous 2585 * prefetch chain; we'll just run the last step ourselves. If we're at 2586 * a higher level, though, we want to issue the prefetches for all the 2587 * indirect blocks asynchronously, so we can go on with whatever we were 2588 * doing. 2589 */ 2590 if (curlevel == level) { 2591 ASSERT3U(curblkid, ==, blkid); 2592 dbuf_issue_final_prefetch(dpa, &bp); 2593 kmem_free(dpa, sizeof (*dpa)); 2594 } else { 2595 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2596 zbookmark_phys_t zb; 2597 2598 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2599 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2600 iter_aflags |= ARC_FLAG_L2CACHE; 2601 2602 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2603 dn->dn_object, curlevel, curblkid); 2604 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2605 &bp, dbuf_prefetch_indirect_done, dpa, prio, 2606 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2607 &iter_aflags, &zb); 2608 } 2609 /* 2610 * We use pio here instead of dpa_zio since it's possible that 2611 * dpa may have already been freed. 2612 */ 2613 zio_nowait(pio); 2614 } 2615 2616 /* 2617 * Returns with db_holds incremented, and db_mtx not held. 2618 * Note: dn_struct_rwlock must be held. 2619 */ 2620 int 2621 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2622 boolean_t fail_sparse, boolean_t fail_uncached, 2623 void *tag, dmu_buf_impl_t **dbp) 2624 { 2625 dmu_buf_impl_t *db, *parent = NULL; 2626 2627 ASSERT(blkid != DMU_BONUS_BLKID); 2628 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2629 ASSERT3U(dn->dn_nlevels, >, level); 2630 2631 *dbp = NULL; 2632 top: 2633 /* dbuf_find() returns with db_mtx held */ 2634 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2635 2636 if (db == NULL) { 2637 blkptr_t *bp = NULL; 2638 int err; 2639 2640 if (fail_uncached) 2641 return (SET_ERROR(ENOENT)); 2642 2643 ASSERT3P(parent, ==, NULL); 2644 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2645 if (fail_sparse) { 2646 if (err == 0 && bp && BP_IS_HOLE(bp)) 2647 err = SET_ERROR(ENOENT); 2648 if (err) { 2649 if (parent) 2650 dbuf_rele(parent, NULL); 2651 return (err); 2652 } 2653 } 2654 if (err && err != ENOENT) 2655 return (err); 2656 db = dbuf_create(dn, level, blkid, parent, bp); 2657 } 2658 2659 if (fail_uncached && db->db_state != DB_CACHED) { 2660 mutex_exit(&db->db_mtx); 2661 return (SET_ERROR(ENOENT)); 2662 } 2663 2664 if (db->db_buf != NULL) { 2665 arc_buf_access(db->db_buf); 2666 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2667 } 2668 2669 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2670 2671 /* 2672 * If this buffer is currently syncing out, and we are are 2673 * still referencing it from db_data, we need to make a copy 2674 * of it in case we decide we want to dirty it again in this txg. 2675 */ 2676 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2677 dn->dn_object != DMU_META_DNODE_OBJECT && 2678 db->db_state == DB_CACHED && db->db_data_pending) { 2679 dbuf_dirty_record_t *dr = db->db_data_pending; 2680 2681 if (dr->dt.dl.dr_data == db->db_buf) { 2682 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2683 2684 dbuf_set_data(db, 2685 arc_alloc_buf(dn->dn_objset->os_spa, db, type, 2686 db->db.db_size)); 2687 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2688 db->db.db_size); 2689 } 2690 } 2691 2692 if (multilist_link_active(&db->db_cache_link)) { 2693 ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2694 ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2695 db->db_caching_status == DB_DBUF_METADATA_CACHE); 2696 2697 multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2698 (void) zfs_refcount_remove_many( 2699 &dbuf_caches[db->db_caching_status].size, 2700 db->db.db_size, db); 2701 2702 db->db_caching_status = DB_NO_CACHE; 2703 } 2704 (void) zfs_refcount_add(&db->db_holds, tag); 2705 DBUF_VERIFY(db); 2706 mutex_exit(&db->db_mtx); 2707 2708 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2709 if (parent) 2710 dbuf_rele(parent, NULL); 2711 2712 ASSERT3P(DB_DNODE(db), ==, dn); 2713 ASSERT3U(db->db_blkid, ==, blkid); 2714 ASSERT3U(db->db_level, ==, level); 2715 *dbp = db; 2716 2717 return (0); 2718 } 2719 2720 dmu_buf_impl_t * 2721 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2722 { 2723 return (dbuf_hold_level(dn, 0, blkid, tag)); 2724 } 2725 2726 dmu_buf_impl_t * 2727 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2728 { 2729 dmu_buf_impl_t *db; 2730 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2731 return (err ? NULL : db); 2732 } 2733 2734 void 2735 dbuf_create_bonus(dnode_t *dn) 2736 { 2737 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2738 2739 ASSERT(dn->dn_bonus == NULL); 2740 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 2741 } 2742 2743 int 2744 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 2745 { 2746 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2747 dnode_t *dn; 2748 2749 if (db->db_blkid != DMU_SPILL_BLKID) 2750 return (SET_ERROR(ENOTSUP)); 2751 if (blksz == 0) 2752 blksz = SPA_MINBLOCKSIZE; 2753 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2754 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 2755 2756 DB_DNODE_ENTER(db); 2757 dn = DB_DNODE(db); 2758 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2759 dbuf_new_size(db, blksz, tx); 2760 rw_exit(&dn->dn_struct_rwlock); 2761 DB_DNODE_EXIT(db); 2762 2763 return (0); 2764 } 2765 2766 void 2767 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 2768 { 2769 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2770 } 2771 2772 #pragma weak dmu_buf_add_ref = dbuf_add_ref 2773 void 2774 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2775 { 2776 int64_t holds = zfs_refcount_add(&db->db_holds, tag); 2777 ASSERT3S(holds, >, 1); 2778 } 2779 2780 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2781 boolean_t 2782 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2783 void *tag) 2784 { 2785 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2786 dmu_buf_impl_t *found_db; 2787 boolean_t result = B_FALSE; 2788 2789 if (db->db_blkid == DMU_BONUS_BLKID) 2790 found_db = dbuf_find_bonus(os, obj); 2791 else 2792 found_db = dbuf_find(os, obj, 0, blkid); 2793 2794 if (found_db != NULL) { 2795 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2796 (void) zfs_refcount_add(&db->db_holds, tag); 2797 result = B_TRUE; 2798 } 2799 mutex_exit(&db->db_mtx); 2800 } 2801 return (result); 2802 } 2803 2804 /* 2805 * If you call dbuf_rele() you had better not be referencing the dnode handle 2806 * unless you have some other direct or indirect hold on the dnode. (An indirect 2807 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2808 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2809 * dnode's parent dbuf evicting its dnode handles. 2810 */ 2811 void 2812 dbuf_rele(dmu_buf_impl_t *db, void *tag) 2813 { 2814 mutex_enter(&db->db_mtx); 2815 dbuf_rele_and_unlock(db, tag, B_FALSE); 2816 } 2817 2818 void 2819 dmu_buf_rele(dmu_buf_t *db, void *tag) 2820 { 2821 dbuf_rele((dmu_buf_impl_t *)db, tag); 2822 } 2823 2824 /* 2825 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2826 * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 2827 * argument should be set if we are already in the dbuf-evicting code 2828 * path, in which case we don't want to recursively evict. This allows us to 2829 * avoid deeply nested stacks that would have a call flow similar to this: 2830 * 2831 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 2832 * ^ | 2833 * | | 2834 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 2835 * 2836 */ 2837 void 2838 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting) 2839 { 2840 int64_t holds; 2841 2842 ASSERT(MUTEX_HELD(&db->db_mtx)); 2843 DBUF_VERIFY(db); 2844 2845 /* 2846 * Remove the reference to the dbuf before removing its hold on the 2847 * dnode so we can guarantee in dnode_move() that a referenced bonus 2848 * buffer has a corresponding dnode hold. 2849 */ 2850 holds = zfs_refcount_remove(&db->db_holds, tag); 2851 ASSERT(holds >= 0); 2852 2853 /* 2854 * We can't freeze indirects if there is a possibility that they 2855 * may be modified in the current syncing context. 2856 */ 2857 if (db->db_buf != NULL && 2858 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 2859 arc_buf_freeze(db->db_buf); 2860 } 2861 2862 if (holds == db->db_dirtycnt && 2863 db->db_level == 0 && db->db_user_immediate_evict) 2864 dbuf_evict_user(db); 2865 2866 if (holds == 0) { 2867 if (db->db_blkid == DMU_BONUS_BLKID) { 2868 dnode_t *dn; 2869 boolean_t evict_dbuf = db->db_pending_evict; 2870 2871 /* 2872 * If the dnode moves here, we cannot cross this 2873 * barrier until the move completes. 2874 */ 2875 DB_DNODE_ENTER(db); 2876 2877 dn = DB_DNODE(db); 2878 atomic_dec_32(&dn->dn_dbufs_count); 2879 2880 /* 2881 * Decrementing the dbuf count means that the bonus 2882 * buffer's dnode hold is no longer discounted in 2883 * dnode_move(). The dnode cannot move until after 2884 * the dnode_rele() below. 2885 */ 2886 DB_DNODE_EXIT(db); 2887 2888 /* 2889 * Do not reference db after its lock is dropped. 2890 * Another thread may evict it. 2891 */ 2892 mutex_exit(&db->db_mtx); 2893 2894 if (evict_dbuf) 2895 dnode_evict_bonus(dn); 2896 2897 dnode_rele(dn, db); 2898 } else if (db->db_buf == NULL) { 2899 /* 2900 * This is a special case: we never associated this 2901 * dbuf with any data allocated from the ARC. 2902 */ 2903 ASSERT(db->db_state == DB_UNCACHED || 2904 db->db_state == DB_NOFILL); 2905 dbuf_destroy(db); 2906 } else if (arc_released(db->db_buf)) { 2907 /* 2908 * This dbuf has anonymous data associated with it. 2909 */ 2910 dbuf_destroy(db); 2911 } else { 2912 boolean_t do_arc_evict = B_FALSE; 2913 blkptr_t bp; 2914 spa_t *spa = dmu_objset_spa(db->db_objset); 2915 2916 if (!DBUF_IS_CACHEABLE(db) && 2917 db->db_blkptr != NULL && 2918 !BP_IS_HOLE(db->db_blkptr) && 2919 !BP_IS_EMBEDDED(db->db_blkptr)) { 2920 do_arc_evict = B_TRUE; 2921 bp = *db->db_blkptr; 2922 } 2923 2924 if (!DBUF_IS_CACHEABLE(db) || 2925 db->db_pending_evict) { 2926 dbuf_destroy(db); 2927 } else if (!multilist_link_active(&db->db_cache_link)) { 2928 ASSERT3U(db->db_caching_status, ==, 2929 DB_NO_CACHE); 2930 2931 dbuf_cached_state_t dcs = 2932 dbuf_include_in_metadata_cache(db) ? 2933 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 2934 db->db_caching_status = dcs; 2935 2936 multilist_insert(dbuf_caches[dcs].cache, db); 2937 (void) zfs_refcount_add_many( 2938 &dbuf_caches[dcs].size, db->db.db_size, db); 2939 mutex_exit(&db->db_mtx); 2940 2941 if (db->db_caching_status == DB_DBUF_CACHE && 2942 !evicting) { 2943 dbuf_evict_notify(); 2944 } 2945 } 2946 2947 if (do_arc_evict) 2948 arc_freed(spa, &bp); 2949 } 2950 } else { 2951 mutex_exit(&db->db_mtx); 2952 } 2953 2954 } 2955 2956 #pragma weak dmu_buf_refcount = dbuf_refcount 2957 uint64_t 2958 dbuf_refcount(dmu_buf_impl_t *db) 2959 { 2960 return (zfs_refcount_count(&db->db_holds)); 2961 } 2962 2963 void * 2964 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2965 dmu_buf_user_t *new_user) 2966 { 2967 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2968 2969 mutex_enter(&db->db_mtx); 2970 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2971 if (db->db_user == old_user) 2972 db->db_user = new_user; 2973 else 2974 old_user = db->db_user; 2975 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2976 mutex_exit(&db->db_mtx); 2977 2978 return (old_user); 2979 } 2980 2981 void * 2982 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2983 { 2984 return (dmu_buf_replace_user(db_fake, NULL, user)); 2985 } 2986 2987 void * 2988 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2989 { 2990 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2991 2992 db->db_user_immediate_evict = TRUE; 2993 return (dmu_buf_set_user(db_fake, user)); 2994 } 2995 2996 void * 2997 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2998 { 2999 return (dmu_buf_replace_user(db_fake, user, NULL)); 3000 } 3001 3002 void * 3003 dmu_buf_get_user(dmu_buf_t *db_fake) 3004 { 3005 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3006 3007 dbuf_verify_user(db, DBVU_NOT_EVICTING); 3008 return (db->db_user); 3009 } 3010 3011 void 3012 dmu_buf_user_evict_wait() 3013 { 3014 taskq_wait(dbu_evict_taskq); 3015 } 3016 3017 blkptr_t * 3018 dmu_buf_get_blkptr(dmu_buf_t *db) 3019 { 3020 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3021 return (dbi->db_blkptr); 3022 } 3023 3024 objset_t * 3025 dmu_buf_get_objset(dmu_buf_t *db) 3026 { 3027 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3028 return (dbi->db_objset); 3029 } 3030 3031 dnode_t * 3032 dmu_buf_dnode_enter(dmu_buf_t *db) 3033 { 3034 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3035 DB_DNODE_ENTER(dbi); 3036 return (DB_DNODE(dbi)); 3037 } 3038 3039 void 3040 dmu_buf_dnode_exit(dmu_buf_t *db) 3041 { 3042 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3043 DB_DNODE_EXIT(dbi); 3044 } 3045 3046 static void 3047 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 3048 { 3049 /* ASSERT(dmu_tx_is_syncing(tx) */ 3050 ASSERT(MUTEX_HELD(&db->db_mtx)); 3051 3052 if (db->db_blkptr != NULL) 3053 return; 3054 3055 if (db->db_blkid == DMU_SPILL_BLKID) { 3056 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 3057 BP_ZERO(db->db_blkptr); 3058 return; 3059 } 3060 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 3061 /* 3062 * This buffer was allocated at a time when there was 3063 * no available blkptrs from the dnode, or it was 3064 * inappropriate to hook it in (i.e., nlevels mis-match). 3065 */ 3066 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 3067 ASSERT(db->db_parent == NULL); 3068 db->db_parent = dn->dn_dbuf; 3069 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 3070 DBUF_VERIFY(db); 3071 } else { 3072 dmu_buf_impl_t *parent = db->db_parent; 3073 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3074 3075 ASSERT(dn->dn_phys->dn_nlevels > 1); 3076 if (parent == NULL) { 3077 mutex_exit(&db->db_mtx); 3078 rw_enter(&dn->dn_struct_rwlock, RW_READER); 3079 parent = dbuf_hold_level(dn, db->db_level + 1, 3080 db->db_blkid >> epbs, db); 3081 rw_exit(&dn->dn_struct_rwlock); 3082 mutex_enter(&db->db_mtx); 3083 db->db_parent = parent; 3084 } 3085 db->db_blkptr = (blkptr_t *)parent->db.db_data + 3086 (db->db_blkid & ((1ULL << epbs) - 1)); 3087 DBUF_VERIFY(db); 3088 } 3089 } 3090 3091 static void 3092 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3093 { 3094 dmu_buf_impl_t *db = dr->dr_dbuf; 3095 dnode_t *dn; 3096 zio_t *zio; 3097 3098 ASSERT(dmu_tx_is_syncing(tx)); 3099 3100 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3101 3102 mutex_enter(&db->db_mtx); 3103 3104 ASSERT(db->db_level > 0); 3105 DBUF_VERIFY(db); 3106 3107 /* Read the block if it hasn't been read yet. */ 3108 if (db->db_buf == NULL) { 3109 mutex_exit(&db->db_mtx); 3110 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 3111 mutex_enter(&db->db_mtx); 3112 } 3113 ASSERT3U(db->db_state, ==, DB_CACHED); 3114 ASSERT(db->db_buf != NULL); 3115 3116 DB_DNODE_ENTER(db); 3117 dn = DB_DNODE(db); 3118 /* Indirect block size must match what the dnode thinks it is. */ 3119 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3120 dbuf_check_blkptr(dn, db); 3121 DB_DNODE_EXIT(db); 3122 3123 /* Provide the pending dirty record to child dbufs */ 3124 db->db_data_pending = dr; 3125 3126 mutex_exit(&db->db_mtx); 3127 3128 dbuf_write(dr, db->db_buf, tx); 3129 3130 zio = dr->dr_zio; 3131 mutex_enter(&dr->dt.di.dr_mtx); 3132 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3133 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3134 mutex_exit(&dr->dt.di.dr_mtx); 3135 zio_nowait(zio); 3136 } 3137 3138 static void 3139 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3140 { 3141 arc_buf_t **datap = &dr->dt.dl.dr_data; 3142 dmu_buf_impl_t *db = dr->dr_dbuf; 3143 dnode_t *dn; 3144 objset_t *os; 3145 uint64_t txg = tx->tx_txg; 3146 3147 ASSERT(dmu_tx_is_syncing(tx)); 3148 3149 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3150 3151 mutex_enter(&db->db_mtx); 3152 /* 3153 * To be synced, we must be dirtied. But we 3154 * might have been freed after the dirty. 3155 */ 3156 if (db->db_state == DB_UNCACHED) { 3157 /* This buffer has been freed since it was dirtied */ 3158 ASSERT(db->db.db_data == NULL); 3159 } else if (db->db_state == DB_FILL) { 3160 /* This buffer was freed and is now being re-filled */ 3161 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3162 } else { 3163 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3164 } 3165 DBUF_VERIFY(db); 3166 3167 DB_DNODE_ENTER(db); 3168 dn = DB_DNODE(db); 3169 3170 if (db->db_blkid == DMU_SPILL_BLKID) { 3171 mutex_enter(&dn->dn_mtx); 3172 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 3173 mutex_exit(&dn->dn_mtx); 3174 } 3175 3176 /* 3177 * If this is a bonus buffer, simply copy the bonus data into the 3178 * dnode. It will be written out when the dnode is synced (and it 3179 * will be synced, since it must have been dirty for dbuf_sync to 3180 * be called). 3181 */ 3182 if (db->db_blkid == DMU_BONUS_BLKID) { 3183 dbuf_dirty_record_t **drp; 3184 3185 ASSERT(*datap != NULL); 3186 ASSERT0(db->db_level); 3187 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 3188 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 3189 bcopy(*datap, DN_BONUS(dn->dn_phys), 3190 DN_MAX_BONUS_LEN(dn->dn_phys)); 3191 DB_DNODE_EXIT(db); 3192 3193 if (*datap != db->db.db_data) { 3194 int slots = DB_DNODE(db)->dn_num_slots; 3195 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 3196 zio_buf_free(*datap, bonuslen); 3197 arc_space_return(bonuslen, ARC_SPACE_BONUS); 3198 } 3199 db->db_data_pending = NULL; 3200 drp = &db->db_last_dirty; 3201 while (*drp != dr) 3202 drp = &(*drp)->dr_next; 3203 ASSERT(dr->dr_next == NULL); 3204 ASSERT(dr->dr_dbuf == db); 3205 *drp = dr->dr_next; 3206 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3207 ASSERT(db->db_dirtycnt > 0); 3208 db->db_dirtycnt -= 1; 3209 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE); 3210 return; 3211 } 3212 3213 os = dn->dn_objset; 3214 3215 /* 3216 * This function may have dropped the db_mtx lock allowing a dmu_sync 3217 * operation to sneak in. As a result, we need to ensure that we 3218 * don't check the dr_override_state until we have returned from 3219 * dbuf_check_blkptr. 3220 */ 3221 dbuf_check_blkptr(dn, db); 3222 3223 /* 3224 * If this buffer is in the middle of an immediate write, 3225 * wait for the synchronous IO to complete. 3226 */ 3227 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3228 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3229 cv_wait(&db->db_changed, &db->db_mtx); 3230 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3231 } 3232 3233 if (db->db_state != DB_NOFILL && 3234 dn->dn_object != DMU_META_DNODE_OBJECT && 3235 zfs_refcount_count(&db->db_holds) > 1 && 3236 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3237 *datap == db->db_buf) { 3238 /* 3239 * If this buffer is currently "in use" (i.e., there 3240 * are active holds and db_data still references it), 3241 * then make a copy before we start the write so that 3242 * any modifications from the open txg will not leak 3243 * into this write. 3244 * 3245 * NOTE: this copy does not need to be made for 3246 * objects only modified in the syncing context (e.g. 3247 * DNONE_DNODE blocks). 3248 */ 3249 int psize = arc_buf_size(*datap); 3250 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 3251 enum zio_compress compress_type = arc_get_compression(*datap); 3252 3253 if (compress_type == ZIO_COMPRESS_OFF) { 3254 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 3255 } else { 3256 ASSERT3U(type, ==, ARC_BUFC_DATA); 3257 int lsize = arc_buf_lsize(*datap); 3258 *datap = arc_alloc_compressed_buf(os->os_spa, db, 3259 psize, lsize, compress_type); 3260 } 3261 bcopy(db->db.db_data, (*datap)->b_data, psize); 3262 } 3263 db->db_data_pending = dr; 3264 3265 mutex_exit(&db->db_mtx); 3266 3267 dbuf_write(dr, *datap, tx); 3268 3269 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3270 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3271 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3272 DB_DNODE_EXIT(db); 3273 } else { 3274 /* 3275 * Although zio_nowait() does not "wait for an IO", it does 3276 * initiate the IO. If this is an empty write it seems plausible 3277 * that the IO could actually be completed before the nowait 3278 * returns. We need to DB_DNODE_EXIT() first in case 3279 * zio_nowait() invalidates the dbuf. 3280 */ 3281 DB_DNODE_EXIT(db); 3282 zio_nowait(dr->dr_zio); 3283 } 3284 } 3285 3286 void 3287 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3288 { 3289 dbuf_dirty_record_t *dr; 3290 3291 while (dr = list_head(list)) { 3292 if (dr->dr_zio != NULL) { 3293 /* 3294 * If we find an already initialized zio then we 3295 * are processing the meta-dnode, and we have finished. 3296 * The dbufs for all dnodes are put back on the list 3297 * during processing, so that we can zio_wait() 3298 * these IOs after initiating all child IOs. 3299 */ 3300 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3301 DMU_META_DNODE_OBJECT); 3302 break; 3303 } 3304 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 3305 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 3306 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 3307 } 3308 list_remove(list, dr); 3309 if (dr->dr_dbuf->db_level > 0) 3310 dbuf_sync_indirect(dr, tx); 3311 else 3312 dbuf_sync_leaf(dr, tx); 3313 } 3314 } 3315 3316 /* ARGSUSED */ 3317 static void 3318 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3319 { 3320 dmu_buf_impl_t *db = vdb; 3321 dnode_t *dn; 3322 blkptr_t *bp = zio->io_bp; 3323 blkptr_t *bp_orig = &zio->io_bp_orig; 3324 spa_t *spa = zio->io_spa; 3325 int64_t delta; 3326 uint64_t fill = 0; 3327 int i; 3328 3329 ASSERT3P(db->db_blkptr, !=, NULL); 3330 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3331 3332 DB_DNODE_ENTER(db); 3333 dn = DB_DNODE(db); 3334 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3335 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3336 zio->io_prev_space_delta = delta; 3337 3338 if (bp->blk_birth != 0) { 3339 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 3340 BP_GET_TYPE(bp) == dn->dn_type) || 3341 (db->db_blkid == DMU_SPILL_BLKID && 3342 BP_GET_TYPE(bp) == dn->dn_bonustype) || 3343 BP_IS_EMBEDDED(bp)); 3344 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 3345 } 3346 3347 mutex_enter(&db->db_mtx); 3348 3349 #ifdef ZFS_DEBUG 3350 if (db->db_blkid == DMU_SPILL_BLKID) { 3351 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3352 ASSERT(!(BP_IS_HOLE(bp)) && 3353 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 3354 } 3355 #endif 3356 3357 if (db->db_level == 0) { 3358 mutex_enter(&dn->dn_mtx); 3359 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 3360 db->db_blkid != DMU_SPILL_BLKID) 3361 dn->dn_phys->dn_maxblkid = db->db_blkid; 3362 mutex_exit(&dn->dn_mtx); 3363 3364 if (dn->dn_type == DMU_OT_DNODE) { 3365 i = 0; 3366 while (i < db->db.db_size) { 3367 dnode_phys_t *dnp = 3368 (void *)(((char *)db->db.db_data) + i); 3369 3370 i += DNODE_MIN_SIZE; 3371 if (dnp->dn_type != DMU_OT_NONE) { 3372 fill++; 3373 i += dnp->dn_extra_slots * 3374 DNODE_MIN_SIZE; 3375 } 3376 } 3377 } else { 3378 if (BP_IS_HOLE(bp)) { 3379 fill = 0; 3380 } else { 3381 fill = 1; 3382 } 3383 } 3384 } else { 3385 blkptr_t *ibp = db->db.db_data; 3386 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3387 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3388 if (BP_IS_HOLE(ibp)) 3389 continue; 3390 fill += BP_GET_FILL(ibp); 3391 } 3392 } 3393 DB_DNODE_EXIT(db); 3394 3395 if (!BP_IS_EMBEDDED(bp)) 3396 bp->blk_fill = fill; 3397 3398 mutex_exit(&db->db_mtx); 3399 3400 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3401 *db->db_blkptr = *bp; 3402 rw_exit(&dn->dn_struct_rwlock); 3403 } 3404 3405 /* ARGSUSED */ 3406 /* 3407 * This function gets called just prior to running through the compression 3408 * stage of the zio pipeline. If we're an indirect block comprised of only 3409 * holes, then we want this indirect to be compressed away to a hole. In 3410 * order to do that we must zero out any information about the holes that 3411 * this indirect points to prior to before we try to compress it. 3412 */ 3413 static void 3414 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3415 { 3416 dmu_buf_impl_t *db = vdb; 3417 dnode_t *dn; 3418 blkptr_t *bp; 3419 unsigned int epbs, i; 3420 3421 ASSERT3U(db->db_level, >, 0); 3422 DB_DNODE_ENTER(db); 3423 dn = DB_DNODE(db); 3424 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3425 ASSERT3U(epbs, <, 31); 3426 3427 /* Determine if all our children are holes */ 3428 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 3429 if (!BP_IS_HOLE(bp)) 3430 break; 3431 } 3432 3433 /* 3434 * If all the children are holes, then zero them all out so that 3435 * we may get compressed away. 3436 */ 3437 if (i == 1 << epbs) { 3438 /* 3439 * We only found holes. Grab the rwlock to prevent 3440 * anybody from reading the blocks we're about to 3441 * zero out. 3442 */ 3443 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3444 bzero(db->db.db_data, db->db.db_size); 3445 rw_exit(&dn->dn_struct_rwlock); 3446 } 3447 DB_DNODE_EXIT(db); 3448 } 3449 3450 /* 3451 * The SPA will call this callback several times for each zio - once 3452 * for every physical child i/o (zio->io_phys_children times). This 3453 * allows the DMU to monitor the progress of each logical i/o. For example, 3454 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 3455 * block. There may be a long delay before all copies/fragments are completed, 3456 * so this callback allows us to retire dirty space gradually, as the physical 3457 * i/os complete. 3458 */ 3459 /* ARGSUSED */ 3460 static void 3461 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 3462 { 3463 dmu_buf_impl_t *db = arg; 3464 objset_t *os = db->db_objset; 3465 dsl_pool_t *dp = dmu_objset_pool(os); 3466 dbuf_dirty_record_t *dr; 3467 int delta = 0; 3468 3469 dr = db->db_data_pending; 3470 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 3471 3472 /* 3473 * The callback will be called io_phys_children times. Retire one 3474 * portion of our dirty space each time we are called. Any rounding 3475 * error will be cleaned up by dsl_pool_sync()'s call to 3476 * dsl_pool_undirty_space(). 3477 */ 3478 delta = dr->dr_accounted / zio->io_phys_children; 3479 dsl_pool_undirty_space(dp, delta, zio->io_txg); 3480 } 3481 3482 /* ARGSUSED */ 3483 static void 3484 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3485 { 3486 dmu_buf_impl_t *db = vdb; 3487 blkptr_t *bp_orig = &zio->io_bp_orig; 3488 blkptr_t *bp = db->db_blkptr; 3489 objset_t *os = db->db_objset; 3490 dmu_tx_t *tx = os->os_synctx; 3491 dbuf_dirty_record_t **drp, *dr; 3492 3493 ASSERT0(zio->io_error); 3494 ASSERT(db->db_blkptr == bp); 3495 3496 /* 3497 * For nopwrites and rewrites we ensure that the bp matches our 3498 * original and bypass all the accounting. 3499 */ 3500 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3501 ASSERT(BP_EQUAL(bp, bp_orig)); 3502 } else { 3503 dsl_dataset_t *ds = os->os_dsl_dataset; 3504 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3505 dsl_dataset_block_born(ds, bp, tx); 3506 } 3507 3508 mutex_enter(&db->db_mtx); 3509 3510 DBUF_VERIFY(db); 3511 3512 drp = &db->db_last_dirty; 3513 while ((dr = *drp) != db->db_data_pending) 3514 drp = &dr->dr_next; 3515 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3516 ASSERT(dr->dr_dbuf == db); 3517 ASSERT(dr->dr_next == NULL); 3518 *drp = dr->dr_next; 3519 3520 #ifdef ZFS_DEBUG 3521 if (db->db_blkid == DMU_SPILL_BLKID) { 3522 dnode_t *dn; 3523 3524 DB_DNODE_ENTER(db); 3525 dn = DB_DNODE(db); 3526 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3527 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 3528 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 3529 DB_DNODE_EXIT(db); 3530 } 3531 #endif 3532 3533 if (db->db_level == 0) { 3534 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3535 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 3536 if (db->db_state != DB_NOFILL) { 3537 if (dr->dt.dl.dr_data != db->db_buf) 3538 arc_buf_destroy(dr->dt.dl.dr_data, db); 3539 } 3540 } else { 3541 dnode_t *dn; 3542 3543 DB_DNODE_ENTER(db); 3544 dn = DB_DNODE(db); 3545 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3546 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3547 if (!BP_IS_HOLE(db->db_blkptr)) { 3548 int epbs = 3549 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3550 ASSERT3U(db->db_blkid, <=, 3551 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3552 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3553 db->db.db_size); 3554 } 3555 DB_DNODE_EXIT(db); 3556 mutex_destroy(&dr->dt.di.dr_mtx); 3557 list_destroy(&dr->dt.di.dr_children); 3558 } 3559 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3560 3561 cv_broadcast(&db->db_changed); 3562 ASSERT(db->db_dirtycnt > 0); 3563 db->db_dirtycnt -= 1; 3564 db->db_data_pending = NULL; 3565 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 3566 } 3567 3568 static void 3569 dbuf_write_nofill_ready(zio_t *zio) 3570 { 3571 dbuf_write_ready(zio, NULL, zio->io_private); 3572 } 3573 3574 static void 3575 dbuf_write_nofill_done(zio_t *zio) 3576 { 3577 dbuf_write_done(zio, NULL, zio->io_private); 3578 } 3579 3580 static void 3581 dbuf_write_override_ready(zio_t *zio) 3582 { 3583 dbuf_dirty_record_t *dr = zio->io_private; 3584 dmu_buf_impl_t *db = dr->dr_dbuf; 3585 3586 dbuf_write_ready(zio, NULL, db); 3587 } 3588 3589 static void 3590 dbuf_write_override_done(zio_t *zio) 3591 { 3592 dbuf_dirty_record_t *dr = zio->io_private; 3593 dmu_buf_impl_t *db = dr->dr_dbuf; 3594 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3595 3596 mutex_enter(&db->db_mtx); 3597 if (!BP_EQUAL(zio->io_bp, obp)) { 3598 if (!BP_IS_HOLE(obp)) 3599 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3600 arc_release(dr->dt.dl.dr_data, db); 3601 } 3602 mutex_exit(&db->db_mtx); 3603 dbuf_write_done(zio, NULL, db); 3604 3605 if (zio->io_abd != NULL) 3606 abd_put(zio->io_abd); 3607 } 3608 3609 typedef struct dbuf_remap_impl_callback_arg { 3610 objset_t *drica_os; 3611 uint64_t drica_blk_birth; 3612 dmu_tx_t *drica_tx; 3613 } dbuf_remap_impl_callback_arg_t; 3614 3615 static void 3616 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 3617 void *arg) 3618 { 3619 dbuf_remap_impl_callback_arg_t *drica = arg; 3620 objset_t *os = drica->drica_os; 3621 spa_t *spa = dmu_objset_spa(os); 3622 dmu_tx_t *tx = drica->drica_tx; 3623 3624 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3625 3626 if (os == spa_meta_objset(spa)) { 3627 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 3628 } else { 3629 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 3630 size, drica->drica_blk_birth, tx); 3631 } 3632 } 3633 3634 static void 3635 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 3636 { 3637 blkptr_t bp_copy = *bp; 3638 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3639 dbuf_remap_impl_callback_arg_t drica; 3640 3641 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3642 3643 drica.drica_os = dn->dn_objset; 3644 drica.drica_blk_birth = bp->blk_birth; 3645 drica.drica_tx = tx; 3646 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 3647 &drica)) { 3648 /* 3649 * The struct_rwlock prevents dbuf_read_impl() from 3650 * dereferencing the BP while we are changing it. To 3651 * avoid lock contention, only grab it when we are actually 3652 * changing the BP. 3653 */ 3654 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3655 *bp = bp_copy; 3656 rw_exit(&dn->dn_struct_rwlock); 3657 } 3658 } 3659 3660 /* 3661 * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 3662 * to remap a copy of every bp in the dbuf. 3663 */ 3664 boolean_t 3665 dbuf_can_remap(const dmu_buf_impl_t *db) 3666 { 3667 spa_t *spa = dmu_objset_spa(db->db_objset); 3668 blkptr_t *bp = db->db.db_data; 3669 boolean_t ret = B_FALSE; 3670 3671 ASSERT3U(db->db_level, >, 0); 3672 ASSERT3S(db->db_state, ==, DB_CACHED); 3673 3674 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3675 3676 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3677 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3678 blkptr_t bp_copy = bp[i]; 3679 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3680 ret = B_TRUE; 3681 break; 3682 } 3683 } 3684 spa_config_exit(spa, SCL_VDEV, FTAG); 3685 3686 return (ret); 3687 } 3688 3689 boolean_t 3690 dnode_needs_remap(const dnode_t *dn) 3691 { 3692 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3693 boolean_t ret = B_FALSE; 3694 3695 if (dn->dn_phys->dn_nlevels == 0) { 3696 return (B_FALSE); 3697 } 3698 3699 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3700 3701 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3702 for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 3703 blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 3704 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3705 ret = B_TRUE; 3706 break; 3707 } 3708 } 3709 spa_config_exit(spa, SCL_VDEV, FTAG); 3710 3711 return (ret); 3712 } 3713 3714 /* 3715 * Remap any existing BP's to concrete vdevs, if possible. 3716 */ 3717 static void 3718 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 3719 { 3720 spa_t *spa = dmu_objset_spa(db->db_objset); 3721 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3722 3723 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 3724 return; 3725 3726 if (db->db_level > 0) { 3727 blkptr_t *bp = db->db.db_data; 3728 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3729 dbuf_remap_impl(dn, &bp[i], tx); 3730 } 3731 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 3732 dnode_phys_t *dnp = db->db.db_data; 3733 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 3734 DMU_OT_DNODE); 3735 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 3736 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 3737 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 3738 } 3739 } 3740 } 3741 } 3742 3743 3744 /* Issue I/O to commit a dirty buffer to disk. */ 3745 static void 3746 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3747 { 3748 dmu_buf_impl_t *db = dr->dr_dbuf; 3749 dnode_t *dn; 3750 objset_t *os; 3751 dmu_buf_impl_t *parent = db->db_parent; 3752 uint64_t txg = tx->tx_txg; 3753 zbookmark_phys_t zb; 3754 zio_prop_t zp; 3755 zio_t *zio; 3756 int wp_flag = 0; 3757 3758 ASSERT(dmu_tx_is_syncing(tx)); 3759 3760 DB_DNODE_ENTER(db); 3761 dn = DB_DNODE(db); 3762 os = dn->dn_objset; 3763 3764 if (db->db_state != DB_NOFILL) { 3765 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3766 /* 3767 * Private object buffers are released here rather 3768 * than in dbuf_dirty() since they are only modified 3769 * in the syncing context and we don't want the 3770 * overhead of making multiple copies of the data. 3771 */ 3772 if (BP_IS_HOLE(db->db_blkptr)) { 3773 arc_buf_thaw(data); 3774 } else { 3775 dbuf_release_bp(db); 3776 } 3777 dbuf_remap(dn, db, tx); 3778 } 3779 } 3780 3781 if (parent != dn->dn_dbuf) { 3782 /* Our parent is an indirect block. */ 3783 /* We have a dirty parent that has been scheduled for write. */ 3784 ASSERT(parent && parent->db_data_pending); 3785 /* Our parent's buffer is one level closer to the dnode. */ 3786 ASSERT(db->db_level == parent->db_level-1); 3787 /* 3788 * We're about to modify our parent's db_data by modifying 3789 * our block pointer, so the parent must be released. 3790 */ 3791 ASSERT(arc_released(parent->db_buf)); 3792 zio = parent->db_data_pending->dr_zio; 3793 } else { 3794 /* Our parent is the dnode itself. */ 3795 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 3796 db->db_blkid != DMU_SPILL_BLKID) || 3797 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 3798 if (db->db_blkid != DMU_SPILL_BLKID) 3799 ASSERT3P(db->db_blkptr, ==, 3800 &dn->dn_phys->dn_blkptr[db->db_blkid]); 3801 zio = dn->dn_zio; 3802 } 3803 3804 ASSERT(db->db_level == 0 || data == db->db_buf); 3805 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3806 ASSERT(zio); 3807 3808 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3809 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3810 db->db.db_object, db->db_level, db->db_blkid); 3811 3812 if (db->db_blkid == DMU_SPILL_BLKID) 3813 wp_flag = WP_SPILL; 3814 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 3815 3816 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3817 DB_DNODE_EXIT(db); 3818 3819 /* 3820 * We copy the blkptr now (rather than when we instantiate the dirty 3821 * record), because its value can change between open context and 3822 * syncing context. We do not need to hold dn_struct_rwlock to read 3823 * db_blkptr because we are in syncing context. 3824 */ 3825 dr->dr_bp_copy = *db->db_blkptr; 3826 3827 if (db->db_level == 0 && 3828 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 3829 /* 3830 * The BP for this block has been provided by open context 3831 * (by dmu_sync() or dmu_buf_write_embedded()). 3832 */ 3833 abd_t *contents = (data != NULL) ? 3834 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 3835 3836 dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 3837 contents, db->db.db_size, db->db.db_size, &zp, 3838 dbuf_write_override_ready, NULL, NULL, 3839 dbuf_write_override_done, 3840 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3841 mutex_enter(&db->db_mtx); 3842 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3843 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 3844 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3845 mutex_exit(&db->db_mtx); 3846 } else if (db->db_state == DB_NOFILL) { 3847 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3848 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3849 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3850 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 3851 dbuf_write_nofill_ready, NULL, NULL, 3852 dbuf_write_nofill_done, db, 3853 ZIO_PRIORITY_ASYNC_WRITE, 3854 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3855 } else { 3856 ASSERT(arc_released(data)); 3857 3858 /* 3859 * For indirect blocks, we want to setup the children 3860 * ready callback so that we can properly handle an indirect 3861 * block that only contains holes. 3862 */ 3863 arc_write_done_func_t *children_ready_cb = NULL; 3864 if (db->db_level != 0) 3865 children_ready_cb = dbuf_write_children_ready; 3866 3867 dr->dr_zio = arc_write(zio, os->os_spa, txg, 3868 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3869 &zp, dbuf_write_ready, children_ready_cb, 3870 dbuf_write_physdone, dbuf_write_done, db, 3871 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3872 } 3873 } 3874