1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_send.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dbuf.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/spa.h> 41 #include <sys/zio.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/zfeature.h> 46 #include <sys/blkptr.h> 47 #include <sys/range_tree.h> 48 #include <sys/callb.h> 49 #include <sys/abd.h> 50 51 uint_t zfs_dbuf_evict_key; 52 53 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 54 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 55 56 #ifndef __lint 57 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 58 dmu_buf_evict_func_t *evict_func_sync, 59 dmu_buf_evict_func_t *evict_func_async, 60 dmu_buf_t **clear_on_evict_dbufp); 61 #endif /* ! __lint */ 62 63 /* 64 * Global data structures and functions for the dbuf cache. 65 */ 66 static kmem_cache_t *dbuf_kmem_cache; 67 static taskq_t *dbu_evict_taskq; 68 69 static kthread_t *dbuf_cache_evict_thread; 70 static kmutex_t dbuf_evict_lock; 71 static kcondvar_t dbuf_evict_cv; 72 static boolean_t dbuf_evict_thread_exit; 73 74 /* 75 * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 76 * are not currently held but have been recently released. These dbufs 77 * are not eligible for arc eviction until they are aged out of the cache. 78 * Dbufs are added to the dbuf cache once the last hold is released. If a 79 * dbuf is later accessed and still exists in the dbuf cache, then it will 80 * be removed from the cache and later re-added to the head of the cache. 81 * Dbufs that are aged out of the cache will be immediately destroyed and 82 * become eligible for arc eviction. 83 */ 84 static multilist_t *dbuf_cache; 85 static refcount_t dbuf_cache_size; 86 uint64_t dbuf_cache_max_bytes = 100 * 1024 * 1024; 87 88 /* Cap the size of the dbuf cache to log2 fraction of arc size. */ 89 int dbuf_cache_max_shift = 5; 90 91 /* 92 * The dbuf cache uses a three-stage eviction policy: 93 * - A low water marker designates when the dbuf eviction thread 94 * should stop evicting from the dbuf cache. 95 * - When we reach the maximum size (aka mid water mark), we 96 * signal the eviction thread to run. 97 * - The high water mark indicates when the eviction thread 98 * is unable to keep up with the incoming load and eviction must 99 * happen in the context of the calling thread. 100 * 101 * The dbuf cache: 102 * (max size) 103 * low water mid water hi water 104 * +----------------------------------------+----------+----------+ 105 * | | | | 106 * | | | | 107 * | | | | 108 * | | | | 109 * +----------------------------------------+----------+----------+ 110 * stop signal evict 111 * evicting eviction directly 112 * thread 113 * 114 * The high and low water marks indicate the operating range for the eviction 115 * thread. The low water mark is, by default, 90% of the total size of the 116 * cache and the high water mark is at 110% (both of these percentages can be 117 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 118 * respectively). The eviction thread will try to ensure that the cache remains 119 * within this range by waking up every second and checking if the cache is 120 * above the low water mark. The thread can also be woken up by callers adding 121 * elements into the cache if the cache is larger than the mid water (i.e max 122 * cache size). Once the eviction thread is woken up and eviction is required, 123 * it will continue evicting buffers until it's able to reduce the cache size 124 * to the low water mark. If the cache size continues to grow and hits the high 125 * water mark, then callers adding elments to the cache will begin to evict 126 * directly from the cache until the cache is no longer above the high water 127 * mark. 128 */ 129 130 /* 131 * The percentage above and below the maximum cache size. 132 */ 133 uint_t dbuf_cache_hiwater_pct = 10; 134 uint_t dbuf_cache_lowater_pct = 10; 135 136 /* ARGSUSED */ 137 static int 138 dbuf_cons(void *vdb, void *unused, int kmflag) 139 { 140 dmu_buf_impl_t *db = vdb; 141 bzero(db, sizeof (dmu_buf_impl_t)); 142 143 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 144 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 145 multilist_link_init(&db->db_cache_link); 146 refcount_create(&db->db_holds); 147 148 return (0); 149 } 150 151 /* ARGSUSED */ 152 static void 153 dbuf_dest(void *vdb, void *unused) 154 { 155 dmu_buf_impl_t *db = vdb; 156 mutex_destroy(&db->db_mtx); 157 cv_destroy(&db->db_changed); 158 ASSERT(!multilist_link_active(&db->db_cache_link)); 159 refcount_destroy(&db->db_holds); 160 } 161 162 /* 163 * dbuf hash table routines 164 */ 165 static dbuf_hash_table_t dbuf_hash_table; 166 167 static uint64_t dbuf_hash_count; 168 169 static uint64_t 170 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 171 { 172 uintptr_t osv = (uintptr_t)os; 173 uint64_t crc = -1ULL; 174 175 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 176 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 177 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 178 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 179 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 180 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 181 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 182 183 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 184 185 return (crc); 186 } 187 188 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 189 ((dbuf)->db.db_object == (obj) && \ 190 (dbuf)->db_objset == (os) && \ 191 (dbuf)->db_level == (level) && \ 192 (dbuf)->db_blkid == (blkid)) 193 194 dmu_buf_impl_t * 195 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 196 { 197 dbuf_hash_table_t *h = &dbuf_hash_table; 198 uint64_t hv = dbuf_hash(os, obj, level, blkid); 199 uint64_t idx = hv & h->hash_table_mask; 200 dmu_buf_impl_t *db; 201 202 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 203 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 204 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 205 mutex_enter(&db->db_mtx); 206 if (db->db_state != DB_EVICTING) { 207 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 208 return (db); 209 } 210 mutex_exit(&db->db_mtx); 211 } 212 } 213 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 214 return (NULL); 215 } 216 217 static dmu_buf_impl_t * 218 dbuf_find_bonus(objset_t *os, uint64_t object) 219 { 220 dnode_t *dn; 221 dmu_buf_impl_t *db = NULL; 222 223 if (dnode_hold(os, object, FTAG, &dn) == 0) { 224 rw_enter(&dn->dn_struct_rwlock, RW_READER); 225 if (dn->dn_bonus != NULL) { 226 db = dn->dn_bonus; 227 mutex_enter(&db->db_mtx); 228 } 229 rw_exit(&dn->dn_struct_rwlock); 230 dnode_rele(dn, FTAG); 231 } 232 return (db); 233 } 234 235 /* 236 * Insert an entry into the hash table. If there is already an element 237 * equal to elem in the hash table, then the already existing element 238 * will be returned and the new element will not be inserted. 239 * Otherwise returns NULL. 240 */ 241 static dmu_buf_impl_t * 242 dbuf_hash_insert(dmu_buf_impl_t *db) 243 { 244 dbuf_hash_table_t *h = &dbuf_hash_table; 245 objset_t *os = db->db_objset; 246 uint64_t obj = db->db.db_object; 247 int level = db->db_level; 248 uint64_t blkid = db->db_blkid; 249 uint64_t hv = dbuf_hash(os, obj, level, blkid); 250 uint64_t idx = hv & h->hash_table_mask; 251 dmu_buf_impl_t *dbf; 252 253 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 254 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 255 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 256 mutex_enter(&dbf->db_mtx); 257 if (dbf->db_state != DB_EVICTING) { 258 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 259 return (dbf); 260 } 261 mutex_exit(&dbf->db_mtx); 262 } 263 } 264 265 mutex_enter(&db->db_mtx); 266 db->db_hash_next = h->hash_table[idx]; 267 h->hash_table[idx] = db; 268 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 269 atomic_inc_64(&dbuf_hash_count); 270 271 return (NULL); 272 } 273 274 /* 275 * Remove an entry from the hash table. It must be in the EVICTING state. 276 */ 277 static void 278 dbuf_hash_remove(dmu_buf_impl_t *db) 279 { 280 dbuf_hash_table_t *h = &dbuf_hash_table; 281 uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 282 db->db_level, db->db_blkid); 283 uint64_t idx = hv & h->hash_table_mask; 284 dmu_buf_impl_t *dbf, **dbp; 285 286 /* 287 * We musn't hold db_mtx to maintain lock ordering: 288 * DBUF_HASH_MUTEX > db_mtx. 289 */ 290 ASSERT(refcount_is_zero(&db->db_holds)); 291 ASSERT(db->db_state == DB_EVICTING); 292 ASSERT(!MUTEX_HELD(&db->db_mtx)); 293 294 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 295 dbp = &h->hash_table[idx]; 296 while ((dbf = *dbp) != db) { 297 dbp = &dbf->db_hash_next; 298 ASSERT(dbf != NULL); 299 } 300 *dbp = db->db_hash_next; 301 db->db_hash_next = NULL; 302 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 303 atomic_dec_64(&dbuf_hash_count); 304 } 305 306 typedef enum { 307 DBVU_EVICTING, 308 DBVU_NOT_EVICTING 309 } dbvu_verify_type_t; 310 311 static void 312 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 313 { 314 #ifdef ZFS_DEBUG 315 int64_t holds; 316 317 if (db->db_user == NULL) 318 return; 319 320 /* Only data blocks support the attachment of user data. */ 321 ASSERT(db->db_level == 0); 322 323 /* Clients must resolve a dbuf before attaching user data. */ 324 ASSERT(db->db.db_data != NULL); 325 ASSERT3U(db->db_state, ==, DB_CACHED); 326 327 holds = refcount_count(&db->db_holds); 328 if (verify_type == DBVU_EVICTING) { 329 /* 330 * Immediate eviction occurs when holds == dirtycnt. 331 * For normal eviction buffers, holds is zero on 332 * eviction, except when dbuf_fix_old_data() calls 333 * dbuf_clear_data(). However, the hold count can grow 334 * during eviction even though db_mtx is held (see 335 * dmu_bonus_hold() for an example), so we can only 336 * test the generic invariant that holds >= dirtycnt. 337 */ 338 ASSERT3U(holds, >=, db->db_dirtycnt); 339 } else { 340 if (db->db_user_immediate_evict == TRUE) 341 ASSERT3U(holds, >=, db->db_dirtycnt); 342 else 343 ASSERT3U(holds, >, 0); 344 } 345 #endif 346 } 347 348 static void 349 dbuf_evict_user(dmu_buf_impl_t *db) 350 { 351 dmu_buf_user_t *dbu = db->db_user; 352 353 ASSERT(MUTEX_HELD(&db->db_mtx)); 354 355 if (dbu == NULL) 356 return; 357 358 dbuf_verify_user(db, DBVU_EVICTING); 359 db->db_user = NULL; 360 361 #ifdef ZFS_DEBUG 362 if (dbu->dbu_clear_on_evict_dbufp != NULL) 363 *dbu->dbu_clear_on_evict_dbufp = NULL; 364 #endif 365 366 /* 367 * There are two eviction callbacks - one that we call synchronously 368 * and one that we invoke via a taskq. The async one is useful for 369 * avoiding lock order reversals and limiting stack depth. 370 * 371 * Note that if we have a sync callback but no async callback, 372 * it's likely that the sync callback will free the structure 373 * containing the dbu. In that case we need to take care to not 374 * dereference dbu after calling the sync evict func. 375 */ 376 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 377 378 if (dbu->dbu_evict_func_sync != NULL) 379 dbu->dbu_evict_func_sync(dbu); 380 381 if (has_async) { 382 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 383 dbu, 0, &dbu->dbu_tqent); 384 } 385 } 386 387 boolean_t 388 dbuf_is_metadata(dmu_buf_impl_t *db) 389 { 390 if (db->db_level > 0) { 391 return (B_TRUE); 392 } else { 393 boolean_t is_metadata; 394 395 DB_DNODE_ENTER(db); 396 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 397 DB_DNODE_EXIT(db); 398 399 return (is_metadata); 400 } 401 } 402 403 /* 404 * This function *must* return indices evenly distributed between all 405 * sublists of the multilist. This is needed due to how the dbuf eviction 406 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 407 * distributed between all sublists and uses this assumption when 408 * deciding which sublist to evict from and how much to evict from it. 409 */ 410 unsigned int 411 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 412 { 413 dmu_buf_impl_t *db = obj; 414 415 /* 416 * The assumption here, is the hash value for a given 417 * dmu_buf_impl_t will remain constant throughout it's lifetime 418 * (i.e. it's objset, object, level and blkid fields don't change). 419 * Thus, we don't need to store the dbuf's sublist index 420 * on insertion, as this index can be recalculated on removal. 421 * 422 * Also, the low order bits of the hash value are thought to be 423 * distributed evenly. Otherwise, in the case that the multilist 424 * has a power of two number of sublists, each sublists' usage 425 * would not be evenly distributed. 426 */ 427 return (dbuf_hash(db->db_objset, db->db.db_object, 428 db->db_level, db->db_blkid) % 429 multilist_get_num_sublists(ml)); 430 } 431 432 static inline boolean_t 433 dbuf_cache_above_hiwater(void) 434 { 435 uint64_t dbuf_cache_hiwater_bytes = 436 (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 437 438 return (refcount_count(&dbuf_cache_size) > 439 dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 440 } 441 442 static inline boolean_t 443 dbuf_cache_above_lowater(void) 444 { 445 uint64_t dbuf_cache_lowater_bytes = 446 (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 447 448 return (refcount_count(&dbuf_cache_size) > 449 dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 450 } 451 452 /* 453 * Evict the oldest eligible dbuf from the dbuf cache. 454 */ 455 static void 456 dbuf_evict_one(void) 457 { 458 int idx = multilist_get_random_index(dbuf_cache); 459 multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); 460 461 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 462 463 /* 464 * Set the thread's tsd to indicate that it's processing evictions. 465 * Once a thread stops evicting from the dbuf cache it will 466 * reset its tsd to NULL. 467 */ 468 ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); 469 (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); 470 471 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 472 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 473 db = multilist_sublist_prev(mls, db); 474 } 475 476 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 477 multilist_sublist_t *, mls); 478 479 if (db != NULL) { 480 multilist_sublist_remove(mls, db); 481 multilist_sublist_unlock(mls); 482 (void) refcount_remove_many(&dbuf_cache_size, 483 db->db.db_size, db); 484 dbuf_destroy(db); 485 } else { 486 multilist_sublist_unlock(mls); 487 } 488 (void) tsd_set(zfs_dbuf_evict_key, NULL); 489 } 490 491 /* 492 * The dbuf evict thread is responsible for aging out dbufs from the 493 * cache. Once the cache has reached it's maximum size, dbufs are removed 494 * and destroyed. The eviction thread will continue running until the size 495 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 496 * out of the cache it is destroyed and becomes eligible for arc eviction. 497 */ 498 static void 499 dbuf_evict_thread(void) 500 { 501 callb_cpr_t cpr; 502 503 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 504 505 mutex_enter(&dbuf_evict_lock); 506 while (!dbuf_evict_thread_exit) { 507 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 508 CALLB_CPR_SAFE_BEGIN(&cpr); 509 (void) cv_timedwait_hires(&dbuf_evict_cv, 510 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 511 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 512 } 513 mutex_exit(&dbuf_evict_lock); 514 515 /* 516 * Keep evicting as long as we're above the low water mark 517 * for the cache. We do this without holding the locks to 518 * minimize lock contention. 519 */ 520 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 521 dbuf_evict_one(); 522 } 523 524 mutex_enter(&dbuf_evict_lock); 525 } 526 527 dbuf_evict_thread_exit = B_FALSE; 528 cv_broadcast(&dbuf_evict_cv); 529 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 530 thread_exit(); 531 } 532 533 /* 534 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 535 * If the dbuf cache is at its high water mark, then evict a dbuf from the 536 * dbuf cache using the callers context. 537 */ 538 static void 539 dbuf_evict_notify(void) 540 { 541 542 /* 543 * We use thread specific data to track when a thread has 544 * started processing evictions. This allows us to avoid deeply 545 * nested stacks that would have a call flow similar to this: 546 * 547 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 548 * ^ | 549 * | | 550 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 551 * 552 * The dbuf_eviction_thread will always have its tsd set until 553 * that thread exits. All other threads will only set their tsd 554 * if they are participating in the eviction process. This only 555 * happens if the eviction thread is unable to process evictions 556 * fast enough. To keep the dbuf cache size in check, other threads 557 * can evict from the dbuf cache directly. Those threads will set 558 * their tsd values so that we ensure that they only evict one dbuf 559 * from the dbuf cache. 560 */ 561 if (tsd_get(zfs_dbuf_evict_key) != NULL) 562 return; 563 564 /* 565 * We check if we should evict without holding the dbuf_evict_lock, 566 * because it's OK to occasionally make the wrong decision here, 567 * and grabbing the lock results in massive lock contention. 568 */ 569 if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { 570 if (dbuf_cache_above_hiwater()) 571 dbuf_evict_one(); 572 cv_signal(&dbuf_evict_cv); 573 } 574 } 575 576 void 577 dbuf_init(void) 578 { 579 uint64_t hsize = 1ULL << 16; 580 dbuf_hash_table_t *h = &dbuf_hash_table; 581 int i; 582 583 /* 584 * The hash table is big enough to fill all of physical memory 585 * with an average 4K block size. The table will take up 586 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 587 */ 588 while (hsize * 4096 < physmem * PAGESIZE) 589 hsize <<= 1; 590 591 retry: 592 h->hash_table_mask = hsize - 1; 593 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 594 if (h->hash_table == NULL) { 595 /* XXX - we should really return an error instead of assert */ 596 ASSERT(hsize > (1ULL << 10)); 597 hsize >>= 1; 598 goto retry; 599 } 600 601 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 602 sizeof (dmu_buf_impl_t), 603 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 604 605 for (i = 0; i < DBUF_MUTEXES; i++) 606 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 607 608 /* 609 * Setup the parameters for the dbuf cache. We cap the size of the 610 * dbuf cache to 1/32nd (default) of the size of the ARC. 611 */ 612 dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes, 613 arc_max_bytes() >> dbuf_cache_max_shift); 614 615 /* 616 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 617 * configuration is not required. 618 */ 619 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 620 621 dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t), 622 offsetof(dmu_buf_impl_t, db_cache_link), 623 dbuf_cache_multilist_index_func); 624 refcount_create(&dbuf_cache_size); 625 626 tsd_create(&zfs_dbuf_evict_key, NULL); 627 dbuf_evict_thread_exit = B_FALSE; 628 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 629 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 630 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 631 NULL, 0, &p0, TS_RUN, minclsyspri); 632 } 633 634 void 635 dbuf_fini(void) 636 { 637 dbuf_hash_table_t *h = &dbuf_hash_table; 638 int i; 639 640 for (i = 0; i < DBUF_MUTEXES; i++) 641 mutex_destroy(&h->hash_mutexes[i]); 642 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 643 kmem_cache_destroy(dbuf_kmem_cache); 644 taskq_destroy(dbu_evict_taskq); 645 646 mutex_enter(&dbuf_evict_lock); 647 dbuf_evict_thread_exit = B_TRUE; 648 while (dbuf_evict_thread_exit) { 649 cv_signal(&dbuf_evict_cv); 650 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 651 } 652 mutex_exit(&dbuf_evict_lock); 653 tsd_destroy(&zfs_dbuf_evict_key); 654 655 mutex_destroy(&dbuf_evict_lock); 656 cv_destroy(&dbuf_evict_cv); 657 658 refcount_destroy(&dbuf_cache_size); 659 multilist_destroy(dbuf_cache); 660 } 661 662 /* 663 * Other stuff. 664 */ 665 666 #ifdef ZFS_DEBUG 667 static void 668 dbuf_verify(dmu_buf_impl_t *db) 669 { 670 dnode_t *dn; 671 dbuf_dirty_record_t *dr; 672 673 ASSERT(MUTEX_HELD(&db->db_mtx)); 674 675 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 676 return; 677 678 ASSERT(db->db_objset != NULL); 679 DB_DNODE_ENTER(db); 680 dn = DB_DNODE(db); 681 if (dn == NULL) { 682 ASSERT(db->db_parent == NULL); 683 ASSERT(db->db_blkptr == NULL); 684 } else { 685 ASSERT3U(db->db.db_object, ==, dn->dn_object); 686 ASSERT3P(db->db_objset, ==, dn->dn_objset); 687 ASSERT3U(db->db_level, <, dn->dn_nlevels); 688 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 689 db->db_blkid == DMU_SPILL_BLKID || 690 !avl_is_empty(&dn->dn_dbufs)); 691 } 692 if (db->db_blkid == DMU_BONUS_BLKID) { 693 ASSERT(dn != NULL); 694 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 695 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 696 } else if (db->db_blkid == DMU_SPILL_BLKID) { 697 ASSERT(dn != NULL); 698 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 699 ASSERT0(db->db.db_offset); 700 } else { 701 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 702 } 703 704 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 705 ASSERT(dr->dr_dbuf == db); 706 707 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 708 ASSERT(dr->dr_dbuf == db); 709 710 /* 711 * We can't assert that db_size matches dn_datablksz because it 712 * can be momentarily different when another thread is doing 713 * dnode_set_blksz(). 714 */ 715 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 716 dr = db->db_data_pending; 717 /* 718 * It should only be modified in syncing context, so 719 * make sure we only have one copy of the data. 720 */ 721 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 722 } 723 724 /* verify db->db_blkptr */ 725 if (db->db_blkptr) { 726 if (db->db_parent == dn->dn_dbuf) { 727 /* db is pointed to by the dnode */ 728 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 729 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 730 ASSERT(db->db_parent == NULL); 731 else 732 ASSERT(db->db_parent != NULL); 733 if (db->db_blkid != DMU_SPILL_BLKID) 734 ASSERT3P(db->db_blkptr, ==, 735 &dn->dn_phys->dn_blkptr[db->db_blkid]); 736 } else { 737 /* db is pointed to by an indirect block */ 738 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 739 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 740 ASSERT3U(db->db_parent->db.db_object, ==, 741 db->db.db_object); 742 /* 743 * dnode_grow_indblksz() can make this fail if we don't 744 * have the struct_rwlock. XXX indblksz no longer 745 * grows. safe to do this now? 746 */ 747 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 748 ASSERT3P(db->db_blkptr, ==, 749 ((blkptr_t *)db->db_parent->db.db_data + 750 db->db_blkid % epb)); 751 } 752 } 753 } 754 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 755 (db->db_buf == NULL || db->db_buf->b_data) && 756 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 757 db->db_state != DB_FILL && !dn->dn_free_txg) { 758 /* 759 * If the blkptr isn't set but they have nonzero data, 760 * it had better be dirty, otherwise we'll lose that 761 * data when we evict this buffer. 762 * 763 * There is an exception to this rule for indirect blocks; in 764 * this case, if the indirect block is a hole, we fill in a few 765 * fields on each of the child blocks (importantly, birth time) 766 * to prevent hole birth times from being lost when you 767 * partially fill in a hole. 768 */ 769 if (db->db_dirtycnt == 0) { 770 if (db->db_level == 0) { 771 uint64_t *buf = db->db.db_data; 772 int i; 773 774 for (i = 0; i < db->db.db_size >> 3; i++) { 775 ASSERT(buf[i] == 0); 776 } 777 } else { 778 blkptr_t *bps = db->db.db_data; 779 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 780 db->db.db_size); 781 /* 782 * We want to verify that all the blkptrs in the 783 * indirect block are holes, but we may have 784 * automatically set up a few fields for them. 785 * We iterate through each blkptr and verify 786 * they only have those fields set. 787 */ 788 for (int i = 0; 789 i < db->db.db_size / sizeof (blkptr_t); 790 i++) { 791 blkptr_t *bp = &bps[i]; 792 ASSERT(ZIO_CHECKSUM_IS_ZERO( 793 &bp->blk_cksum)); 794 ASSERT( 795 DVA_IS_EMPTY(&bp->blk_dva[0]) && 796 DVA_IS_EMPTY(&bp->blk_dva[1]) && 797 DVA_IS_EMPTY(&bp->blk_dva[2])); 798 ASSERT0(bp->blk_fill); 799 ASSERT0(bp->blk_pad[0]); 800 ASSERT0(bp->blk_pad[1]); 801 ASSERT(!BP_IS_EMBEDDED(bp)); 802 ASSERT(BP_IS_HOLE(bp)); 803 ASSERT0(bp->blk_phys_birth); 804 } 805 } 806 } 807 } 808 DB_DNODE_EXIT(db); 809 } 810 #endif 811 812 static void 813 dbuf_clear_data(dmu_buf_impl_t *db) 814 { 815 ASSERT(MUTEX_HELD(&db->db_mtx)); 816 dbuf_evict_user(db); 817 ASSERT3P(db->db_buf, ==, NULL); 818 db->db.db_data = NULL; 819 if (db->db_state != DB_NOFILL) 820 db->db_state = DB_UNCACHED; 821 } 822 823 static void 824 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 825 { 826 ASSERT(MUTEX_HELD(&db->db_mtx)); 827 ASSERT(buf != NULL); 828 829 db->db_buf = buf; 830 ASSERT(buf->b_data != NULL); 831 db->db.db_data = buf->b_data; 832 } 833 834 /* 835 * Loan out an arc_buf for read. Return the loaned arc_buf. 836 */ 837 arc_buf_t * 838 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 839 { 840 arc_buf_t *abuf; 841 842 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 843 mutex_enter(&db->db_mtx); 844 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 845 int blksz = db->db.db_size; 846 spa_t *spa = db->db_objset->os_spa; 847 848 mutex_exit(&db->db_mtx); 849 abuf = arc_loan_buf(spa, B_FALSE, blksz); 850 bcopy(db->db.db_data, abuf->b_data, blksz); 851 } else { 852 abuf = db->db_buf; 853 arc_loan_inuse_buf(abuf, db); 854 db->db_buf = NULL; 855 dbuf_clear_data(db); 856 mutex_exit(&db->db_mtx); 857 } 858 return (abuf); 859 } 860 861 /* 862 * Calculate which level n block references the data at the level 0 offset 863 * provided. 864 */ 865 uint64_t 866 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 867 { 868 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 869 /* 870 * The level n blkid is equal to the level 0 blkid divided by 871 * the number of level 0s in a level n block. 872 * 873 * The level 0 blkid is offset >> datablkshift = 874 * offset / 2^datablkshift. 875 * 876 * The number of level 0s in a level n is the number of block 877 * pointers in an indirect block, raised to the power of level. 878 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 879 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 880 * 881 * Thus, the level n blkid is: offset / 882 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 883 * = offset / 2^(datablkshift + level * 884 * (indblkshift - SPA_BLKPTRSHIFT)) 885 * = offset >> (datablkshift + level * 886 * (indblkshift - SPA_BLKPTRSHIFT)) 887 */ 888 return (offset >> (dn->dn_datablkshift + level * 889 (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 890 } else { 891 ASSERT3U(offset, <, dn->dn_datablksz); 892 return (0); 893 } 894 } 895 896 static void 897 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 898 { 899 dmu_buf_impl_t *db = vdb; 900 901 mutex_enter(&db->db_mtx); 902 ASSERT3U(db->db_state, ==, DB_READ); 903 /* 904 * All reads are synchronous, so we must have a hold on the dbuf 905 */ 906 ASSERT(refcount_count(&db->db_holds) > 0); 907 ASSERT(db->db_buf == NULL); 908 ASSERT(db->db.db_data == NULL); 909 if (db->db_level == 0 && db->db_freed_in_flight) { 910 /* we were freed in flight; disregard any error */ 911 arc_release(buf, db); 912 bzero(buf->b_data, db->db.db_size); 913 arc_buf_freeze(buf); 914 db->db_freed_in_flight = FALSE; 915 dbuf_set_data(db, buf); 916 db->db_state = DB_CACHED; 917 } else if (zio == NULL || zio->io_error == 0) { 918 dbuf_set_data(db, buf); 919 db->db_state = DB_CACHED; 920 } else { 921 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 922 ASSERT3P(db->db_buf, ==, NULL); 923 arc_buf_destroy(buf, db); 924 db->db_state = DB_UNCACHED; 925 } 926 cv_broadcast(&db->db_changed); 927 dbuf_rele_and_unlock(db, NULL); 928 } 929 930 static void 931 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 932 { 933 dnode_t *dn; 934 zbookmark_phys_t zb; 935 arc_flags_t aflags = ARC_FLAG_NOWAIT; 936 937 DB_DNODE_ENTER(db); 938 dn = DB_DNODE(db); 939 ASSERT(!refcount_is_zero(&db->db_holds)); 940 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 941 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 942 ASSERT(MUTEX_HELD(&db->db_mtx)); 943 ASSERT(db->db_state == DB_UNCACHED); 944 ASSERT(db->db_buf == NULL); 945 946 if (db->db_blkid == DMU_BONUS_BLKID) { 947 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 948 949 ASSERT3U(bonuslen, <=, db->db.db_size); 950 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 951 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 952 if (bonuslen < DN_MAX_BONUSLEN) 953 bzero(db->db.db_data, DN_MAX_BONUSLEN); 954 if (bonuslen) 955 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 956 DB_DNODE_EXIT(db); 957 db->db_state = DB_CACHED; 958 mutex_exit(&db->db_mtx); 959 return; 960 } 961 962 /* 963 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 964 * processes the delete record and clears the bp while we are waiting 965 * for the dn_mtx (resulting in a "no" from block_freed). 966 */ 967 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 968 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 969 BP_IS_HOLE(db->db_blkptr)))) { 970 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 971 972 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 973 db->db.db_size)); 974 bzero(db->db.db_data, db->db.db_size); 975 976 if (db->db_blkptr != NULL && db->db_level > 0 && 977 BP_IS_HOLE(db->db_blkptr) && 978 db->db_blkptr->blk_birth != 0) { 979 blkptr_t *bps = db->db.db_data; 980 for (int i = 0; i < ((1 << 981 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 982 i++) { 983 blkptr_t *bp = &bps[i]; 984 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 985 1 << dn->dn_indblkshift); 986 BP_SET_LSIZE(bp, 987 BP_GET_LEVEL(db->db_blkptr) == 1 ? 988 dn->dn_datablksz : 989 BP_GET_LSIZE(db->db_blkptr)); 990 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 991 BP_SET_LEVEL(bp, 992 BP_GET_LEVEL(db->db_blkptr) - 1); 993 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 994 } 995 } 996 DB_DNODE_EXIT(db); 997 db->db_state = DB_CACHED; 998 mutex_exit(&db->db_mtx); 999 return; 1000 } 1001 1002 DB_DNODE_EXIT(db); 1003 1004 db->db_state = DB_READ; 1005 mutex_exit(&db->db_mtx); 1006 1007 if (DBUF_IS_L2CACHEABLE(db)) 1008 aflags |= ARC_FLAG_L2CACHE; 1009 1010 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1011 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1012 db->db.db_object, db->db_level, db->db_blkid); 1013 1014 dbuf_add_ref(db, NULL); 1015 1016 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1017 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1018 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 1019 &aflags, &zb); 1020 } 1021 1022 /* 1023 * This is our just-in-time copy function. It makes a copy of buffers that 1024 * have been modified in a previous transaction group before we access them in 1025 * the current active group. 1026 * 1027 * This function is used in three places: when we are dirtying a buffer for the 1028 * first time in a txg, when we are freeing a range in a dnode that includes 1029 * this buffer, and when we are accessing a buffer which was received compressed 1030 * and later referenced in a WRITE_BYREF record. 1031 * 1032 * Note that when we are called from dbuf_free_range() we do not put a hold on 1033 * the buffer, we just traverse the active dbuf list for the dnode. 1034 */ 1035 static void 1036 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1037 { 1038 dbuf_dirty_record_t *dr = db->db_last_dirty; 1039 1040 ASSERT(MUTEX_HELD(&db->db_mtx)); 1041 ASSERT(db->db.db_data != NULL); 1042 ASSERT(db->db_level == 0); 1043 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1044 1045 if (dr == NULL || 1046 (dr->dt.dl.dr_data != 1047 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1048 return; 1049 1050 /* 1051 * If the last dirty record for this dbuf has not yet synced 1052 * and its referencing the dbuf data, either: 1053 * reset the reference to point to a new copy, 1054 * or (if there a no active holders) 1055 * just null out the current db_data pointer. 1056 */ 1057 ASSERT(dr->dr_txg >= txg - 2); 1058 if (db->db_blkid == DMU_BONUS_BLKID) { 1059 /* Note that the data bufs here are zio_bufs */ 1060 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 1061 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1062 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 1063 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 1064 int size = arc_buf_size(db->db_buf); 1065 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1066 spa_t *spa = db->db_objset->os_spa; 1067 enum zio_compress compress_type = 1068 arc_get_compression(db->db_buf); 1069 1070 if (compress_type == ZIO_COMPRESS_OFF) { 1071 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1072 } else { 1073 ASSERT3U(type, ==, ARC_BUFC_DATA); 1074 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1075 size, arc_buf_lsize(db->db_buf), compress_type); 1076 } 1077 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 1078 } else { 1079 db->db_buf = NULL; 1080 dbuf_clear_data(db); 1081 } 1082 } 1083 1084 int 1085 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1086 { 1087 int err = 0; 1088 boolean_t prefetch; 1089 dnode_t *dn; 1090 1091 /* 1092 * We don't have to hold the mutex to check db_state because it 1093 * can't be freed while we have a hold on the buffer. 1094 */ 1095 ASSERT(!refcount_is_zero(&db->db_holds)); 1096 1097 if (db->db_state == DB_NOFILL) 1098 return (SET_ERROR(EIO)); 1099 1100 DB_DNODE_ENTER(db); 1101 dn = DB_DNODE(db); 1102 if ((flags & DB_RF_HAVESTRUCT) == 0) 1103 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1104 1105 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1106 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 1107 DBUF_IS_CACHEABLE(db); 1108 1109 mutex_enter(&db->db_mtx); 1110 if (db->db_state == DB_CACHED) { 1111 /* 1112 * If the arc buf is compressed, we need to decompress it to 1113 * read the data. This could happen during the "zfs receive" of 1114 * a stream which is compressed and deduplicated. 1115 */ 1116 if (db->db_buf != NULL && 1117 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 1118 dbuf_fix_old_data(db, 1119 spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1120 err = arc_decompress(db->db_buf); 1121 dbuf_set_data(db, db->db_buf); 1122 } 1123 mutex_exit(&db->db_mtx); 1124 if (prefetch) 1125 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1126 if ((flags & DB_RF_HAVESTRUCT) == 0) 1127 rw_exit(&dn->dn_struct_rwlock); 1128 DB_DNODE_EXIT(db); 1129 } else if (db->db_state == DB_UNCACHED) { 1130 spa_t *spa = dn->dn_objset->os_spa; 1131 boolean_t need_wait = B_FALSE; 1132 1133 if (zio == NULL && 1134 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1135 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1136 need_wait = B_TRUE; 1137 } 1138 dbuf_read_impl(db, zio, flags); 1139 1140 /* dbuf_read_impl has dropped db_mtx for us */ 1141 1142 if (prefetch) 1143 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1144 1145 if ((flags & DB_RF_HAVESTRUCT) == 0) 1146 rw_exit(&dn->dn_struct_rwlock); 1147 DB_DNODE_EXIT(db); 1148 1149 if (need_wait) 1150 err = zio_wait(zio); 1151 } else { 1152 /* 1153 * Another reader came in while the dbuf was in flight 1154 * between UNCACHED and CACHED. Either a writer will finish 1155 * writing the buffer (sending the dbuf to CACHED) or the 1156 * first reader's request will reach the read_done callback 1157 * and send the dbuf to CACHED. Otherwise, a failure 1158 * occurred and the dbuf went to UNCACHED. 1159 */ 1160 mutex_exit(&db->db_mtx); 1161 if (prefetch) 1162 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1163 if ((flags & DB_RF_HAVESTRUCT) == 0) 1164 rw_exit(&dn->dn_struct_rwlock); 1165 DB_DNODE_EXIT(db); 1166 1167 /* Skip the wait per the caller's request. */ 1168 mutex_enter(&db->db_mtx); 1169 if ((flags & DB_RF_NEVERWAIT) == 0) { 1170 while (db->db_state == DB_READ || 1171 db->db_state == DB_FILL) { 1172 ASSERT(db->db_state == DB_READ || 1173 (flags & DB_RF_HAVESTRUCT) == 0); 1174 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1175 db, zio_t *, zio); 1176 cv_wait(&db->db_changed, &db->db_mtx); 1177 } 1178 if (db->db_state == DB_UNCACHED) 1179 err = SET_ERROR(EIO); 1180 } 1181 mutex_exit(&db->db_mtx); 1182 } 1183 1184 return (err); 1185 } 1186 1187 static void 1188 dbuf_noread(dmu_buf_impl_t *db) 1189 { 1190 ASSERT(!refcount_is_zero(&db->db_holds)); 1191 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1192 mutex_enter(&db->db_mtx); 1193 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1194 cv_wait(&db->db_changed, &db->db_mtx); 1195 if (db->db_state == DB_UNCACHED) { 1196 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1197 spa_t *spa = db->db_objset->os_spa; 1198 1199 ASSERT(db->db_buf == NULL); 1200 ASSERT(db->db.db_data == NULL); 1201 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1202 db->db_state = DB_FILL; 1203 } else if (db->db_state == DB_NOFILL) { 1204 dbuf_clear_data(db); 1205 } else { 1206 ASSERT3U(db->db_state, ==, DB_CACHED); 1207 } 1208 mutex_exit(&db->db_mtx); 1209 } 1210 1211 void 1212 dbuf_unoverride(dbuf_dirty_record_t *dr) 1213 { 1214 dmu_buf_impl_t *db = dr->dr_dbuf; 1215 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1216 uint64_t txg = dr->dr_txg; 1217 1218 ASSERT(MUTEX_HELD(&db->db_mtx)); 1219 /* 1220 * This assert is valid because dmu_sync() expects to be called by 1221 * a zilog's get_data while holding a range lock. This call only 1222 * comes from dbuf_dirty() callers who must also hold a range lock. 1223 */ 1224 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1225 ASSERT(db->db_level == 0); 1226 1227 if (db->db_blkid == DMU_BONUS_BLKID || 1228 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1229 return; 1230 1231 ASSERT(db->db_data_pending != dr); 1232 1233 /* free this block */ 1234 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1235 zio_free(db->db_objset->os_spa, txg, bp); 1236 1237 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1238 dr->dt.dl.dr_nopwrite = B_FALSE; 1239 1240 /* 1241 * Release the already-written buffer, so we leave it in 1242 * a consistent dirty state. Note that all callers are 1243 * modifying the buffer, so they will immediately do 1244 * another (redundant) arc_release(). Therefore, leave 1245 * the buf thawed to save the effort of freezing & 1246 * immediately re-thawing it. 1247 */ 1248 arc_release(dr->dt.dl.dr_data, db); 1249 } 1250 1251 /* 1252 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1253 * data blocks in the free range, so that any future readers will find 1254 * empty blocks. 1255 */ 1256 void 1257 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1258 dmu_tx_t *tx) 1259 { 1260 dmu_buf_impl_t db_search; 1261 dmu_buf_impl_t *db, *db_next; 1262 uint64_t txg = tx->tx_txg; 1263 avl_index_t where; 1264 1265 if (end_blkid > dn->dn_maxblkid && 1266 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1267 end_blkid = dn->dn_maxblkid; 1268 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 1269 1270 db_search.db_level = 0; 1271 db_search.db_blkid = start_blkid; 1272 db_search.db_state = DB_SEARCH; 1273 1274 mutex_enter(&dn->dn_dbufs_mtx); 1275 db = avl_find(&dn->dn_dbufs, &db_search, &where); 1276 ASSERT3P(db, ==, NULL); 1277 1278 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1279 1280 for (; db != NULL; db = db_next) { 1281 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1282 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1283 1284 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1285 break; 1286 } 1287 ASSERT3U(db->db_blkid, >=, start_blkid); 1288 1289 /* found a level 0 buffer in the range */ 1290 mutex_enter(&db->db_mtx); 1291 if (dbuf_undirty(db, tx)) { 1292 /* mutex has been dropped and dbuf destroyed */ 1293 continue; 1294 } 1295 1296 if (db->db_state == DB_UNCACHED || 1297 db->db_state == DB_NOFILL || 1298 db->db_state == DB_EVICTING) { 1299 ASSERT(db->db.db_data == NULL); 1300 mutex_exit(&db->db_mtx); 1301 continue; 1302 } 1303 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1304 /* will be handled in dbuf_read_done or dbuf_rele */ 1305 db->db_freed_in_flight = TRUE; 1306 mutex_exit(&db->db_mtx); 1307 continue; 1308 } 1309 if (refcount_count(&db->db_holds) == 0) { 1310 ASSERT(db->db_buf); 1311 dbuf_destroy(db); 1312 continue; 1313 } 1314 /* The dbuf is referenced */ 1315 1316 if (db->db_last_dirty != NULL) { 1317 dbuf_dirty_record_t *dr = db->db_last_dirty; 1318 1319 if (dr->dr_txg == txg) { 1320 /* 1321 * This buffer is "in-use", re-adjust the file 1322 * size to reflect that this buffer may 1323 * contain new data when we sync. 1324 */ 1325 if (db->db_blkid != DMU_SPILL_BLKID && 1326 db->db_blkid > dn->dn_maxblkid) 1327 dn->dn_maxblkid = db->db_blkid; 1328 dbuf_unoverride(dr); 1329 } else { 1330 /* 1331 * This dbuf is not dirty in the open context. 1332 * Either uncache it (if its not referenced in 1333 * the open context) or reset its contents to 1334 * empty. 1335 */ 1336 dbuf_fix_old_data(db, txg); 1337 } 1338 } 1339 /* clear the contents if its cached */ 1340 if (db->db_state == DB_CACHED) { 1341 ASSERT(db->db.db_data != NULL); 1342 arc_release(db->db_buf, db); 1343 bzero(db->db.db_data, db->db.db_size); 1344 arc_buf_freeze(db->db_buf); 1345 } 1346 1347 mutex_exit(&db->db_mtx); 1348 } 1349 mutex_exit(&dn->dn_dbufs_mtx); 1350 } 1351 1352 void 1353 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1354 { 1355 arc_buf_t *buf, *obuf; 1356 int osize = db->db.db_size; 1357 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1358 dnode_t *dn; 1359 1360 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1361 1362 DB_DNODE_ENTER(db); 1363 dn = DB_DNODE(db); 1364 1365 /* XXX does *this* func really need the lock? */ 1366 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1367 1368 /* 1369 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1370 * is OK, because there can be no other references to the db 1371 * when we are changing its size, so no concurrent DB_FILL can 1372 * be happening. 1373 */ 1374 /* 1375 * XXX we should be doing a dbuf_read, checking the return 1376 * value and returning that up to our callers 1377 */ 1378 dmu_buf_will_dirty(&db->db, tx); 1379 1380 /* create the data buffer for the new block */ 1381 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1382 1383 /* copy old block data to the new block */ 1384 obuf = db->db_buf; 1385 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1386 /* zero the remainder */ 1387 if (size > osize) 1388 bzero((uint8_t *)buf->b_data + osize, size - osize); 1389 1390 mutex_enter(&db->db_mtx); 1391 dbuf_set_data(db, buf); 1392 arc_buf_destroy(obuf, db); 1393 db->db.db_size = size; 1394 1395 if (db->db_level == 0) { 1396 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1397 db->db_last_dirty->dt.dl.dr_data = buf; 1398 } 1399 mutex_exit(&db->db_mtx); 1400 1401 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1402 DB_DNODE_EXIT(db); 1403 } 1404 1405 void 1406 dbuf_release_bp(dmu_buf_impl_t *db) 1407 { 1408 objset_t *os = db->db_objset; 1409 1410 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1411 ASSERT(arc_released(os->os_phys_buf) || 1412 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1413 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1414 1415 (void) arc_release(db->db_buf, db); 1416 } 1417 1418 /* 1419 * We already have a dirty record for this TXG, and we are being 1420 * dirtied again. 1421 */ 1422 static void 1423 dbuf_redirty(dbuf_dirty_record_t *dr) 1424 { 1425 dmu_buf_impl_t *db = dr->dr_dbuf; 1426 1427 ASSERT(MUTEX_HELD(&db->db_mtx)); 1428 1429 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1430 /* 1431 * If this buffer has already been written out, 1432 * we now need to reset its state. 1433 */ 1434 dbuf_unoverride(dr); 1435 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1436 db->db_state != DB_NOFILL) { 1437 /* Already released on initial dirty, so just thaw. */ 1438 ASSERT(arc_released(db->db_buf)); 1439 arc_buf_thaw(db->db_buf); 1440 } 1441 } 1442 } 1443 1444 dbuf_dirty_record_t * 1445 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1446 { 1447 dnode_t *dn; 1448 objset_t *os; 1449 dbuf_dirty_record_t **drp, *dr; 1450 int drop_struct_lock = FALSE; 1451 int txgoff = tx->tx_txg & TXG_MASK; 1452 1453 ASSERT(tx->tx_txg != 0); 1454 ASSERT(!refcount_is_zero(&db->db_holds)); 1455 DMU_TX_DIRTY_BUF(tx, db); 1456 1457 DB_DNODE_ENTER(db); 1458 dn = DB_DNODE(db); 1459 /* 1460 * Shouldn't dirty a regular buffer in syncing context. Private 1461 * objects may be dirtied in syncing context, but only if they 1462 * were already pre-dirtied in open context. 1463 */ 1464 #ifdef DEBUG 1465 if (dn->dn_objset->os_dsl_dataset != NULL) { 1466 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1467 RW_READER, FTAG); 1468 } 1469 ASSERT(!dmu_tx_is_syncing(tx) || 1470 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1471 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1472 dn->dn_objset->os_dsl_dataset == NULL); 1473 if (dn->dn_objset->os_dsl_dataset != NULL) 1474 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1475 #endif 1476 /* 1477 * We make this assert for private objects as well, but after we 1478 * check if we're already dirty. They are allowed to re-dirty 1479 * in syncing context. 1480 */ 1481 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1482 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1483 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1484 1485 mutex_enter(&db->db_mtx); 1486 /* 1487 * XXX make this true for indirects too? The problem is that 1488 * transactions created with dmu_tx_create_assigned() from 1489 * syncing context don't bother holding ahead. 1490 */ 1491 ASSERT(db->db_level != 0 || 1492 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1493 db->db_state == DB_NOFILL); 1494 1495 mutex_enter(&dn->dn_mtx); 1496 /* 1497 * Don't set dirtyctx to SYNC if we're just modifying this as we 1498 * initialize the objset. 1499 */ 1500 if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1501 if (dn->dn_objset->os_dsl_dataset != NULL) { 1502 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1503 RW_READER, FTAG); 1504 } 1505 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1506 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1507 DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1508 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1509 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1510 } 1511 if (dn->dn_objset->os_dsl_dataset != NULL) { 1512 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1513 FTAG); 1514 } 1515 } 1516 mutex_exit(&dn->dn_mtx); 1517 1518 if (db->db_blkid == DMU_SPILL_BLKID) 1519 dn->dn_have_spill = B_TRUE; 1520 1521 /* 1522 * If this buffer is already dirty, we're done. 1523 */ 1524 drp = &db->db_last_dirty; 1525 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1526 db->db.db_object == DMU_META_DNODE_OBJECT); 1527 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1528 drp = &dr->dr_next; 1529 if (dr && dr->dr_txg == tx->tx_txg) { 1530 DB_DNODE_EXIT(db); 1531 1532 dbuf_redirty(dr); 1533 mutex_exit(&db->db_mtx); 1534 return (dr); 1535 } 1536 1537 /* 1538 * Only valid if not already dirty. 1539 */ 1540 ASSERT(dn->dn_object == 0 || 1541 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1542 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1543 1544 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1545 1546 /* 1547 * We should only be dirtying in syncing context if it's the 1548 * mos or we're initializing the os or it's a special object. 1549 * However, we are allowed to dirty in syncing context provided 1550 * we already dirtied it in open context. Hence we must make 1551 * this assertion only if we're not already dirty. 1552 */ 1553 os = dn->dn_objset; 1554 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1555 #ifdef DEBUG 1556 if (dn->dn_objset->os_dsl_dataset != NULL) 1557 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 1558 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1559 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1560 if (dn->dn_objset->os_dsl_dataset != NULL) 1561 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1562 #endif 1563 ASSERT(db->db.db_size != 0); 1564 1565 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1566 1567 if (db->db_blkid != DMU_BONUS_BLKID) { 1568 dmu_objset_willuse_space(os, db->db.db_size, tx); 1569 } 1570 1571 /* 1572 * If this buffer is dirty in an old transaction group we need 1573 * to make a copy of it so that the changes we make in this 1574 * transaction group won't leak out when we sync the older txg. 1575 */ 1576 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1577 if (db->db_level == 0) { 1578 void *data_old = db->db_buf; 1579 1580 if (db->db_state != DB_NOFILL) { 1581 if (db->db_blkid == DMU_BONUS_BLKID) { 1582 dbuf_fix_old_data(db, tx->tx_txg); 1583 data_old = db->db.db_data; 1584 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1585 /* 1586 * Release the data buffer from the cache so 1587 * that we can modify it without impacting 1588 * possible other users of this cached data 1589 * block. Note that indirect blocks and 1590 * private objects are not released until the 1591 * syncing state (since they are only modified 1592 * then). 1593 */ 1594 arc_release(db->db_buf, db); 1595 dbuf_fix_old_data(db, tx->tx_txg); 1596 data_old = db->db_buf; 1597 } 1598 ASSERT(data_old != NULL); 1599 } 1600 dr->dt.dl.dr_data = data_old; 1601 } else { 1602 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1603 list_create(&dr->dt.di.dr_children, 1604 sizeof (dbuf_dirty_record_t), 1605 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1606 } 1607 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 1608 dr->dr_accounted = db->db.db_size; 1609 dr->dr_dbuf = db; 1610 dr->dr_txg = tx->tx_txg; 1611 dr->dr_next = *drp; 1612 *drp = dr; 1613 1614 /* 1615 * We could have been freed_in_flight between the dbuf_noread 1616 * and dbuf_dirty. We win, as though the dbuf_noread() had 1617 * happened after the free. 1618 */ 1619 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1620 db->db_blkid != DMU_SPILL_BLKID) { 1621 mutex_enter(&dn->dn_mtx); 1622 if (dn->dn_free_ranges[txgoff] != NULL) { 1623 range_tree_clear(dn->dn_free_ranges[txgoff], 1624 db->db_blkid, 1); 1625 } 1626 mutex_exit(&dn->dn_mtx); 1627 db->db_freed_in_flight = FALSE; 1628 } 1629 1630 /* 1631 * This buffer is now part of this txg 1632 */ 1633 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1634 db->db_dirtycnt += 1; 1635 ASSERT3U(db->db_dirtycnt, <=, 3); 1636 1637 mutex_exit(&db->db_mtx); 1638 1639 if (db->db_blkid == DMU_BONUS_BLKID || 1640 db->db_blkid == DMU_SPILL_BLKID) { 1641 mutex_enter(&dn->dn_mtx); 1642 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1643 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1644 mutex_exit(&dn->dn_mtx); 1645 dnode_setdirty(dn, tx); 1646 DB_DNODE_EXIT(db); 1647 return (dr); 1648 } 1649 1650 /* 1651 * The dn_struct_rwlock prevents db_blkptr from changing 1652 * due to a write from syncing context completing 1653 * while we are running, so we want to acquire it before 1654 * looking at db_blkptr. 1655 */ 1656 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1657 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1658 drop_struct_lock = TRUE; 1659 } 1660 1661 /* 1662 * We need to hold the dn_struct_rwlock to make this assertion, 1663 * because it protects dn_phys / dn_next_nlevels from changing. 1664 */ 1665 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1666 dn->dn_phys->dn_nlevels > db->db_level || 1667 dn->dn_next_nlevels[txgoff] > db->db_level || 1668 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1669 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1670 1671 /* 1672 * If we are overwriting a dedup BP, then unless it is snapshotted, 1673 * when we get to syncing context we will need to decrement its 1674 * refcount in the DDT. Prefetch the relevant DDT block so that 1675 * syncing context won't have to wait for the i/o. 1676 */ 1677 ddt_prefetch(os->os_spa, db->db_blkptr); 1678 1679 if (db->db_level == 0) { 1680 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1681 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1682 } 1683 1684 if (db->db_level+1 < dn->dn_nlevels) { 1685 dmu_buf_impl_t *parent = db->db_parent; 1686 dbuf_dirty_record_t *di; 1687 int parent_held = FALSE; 1688 1689 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1690 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1691 1692 parent = dbuf_hold_level(dn, db->db_level+1, 1693 db->db_blkid >> epbs, FTAG); 1694 ASSERT(parent != NULL); 1695 parent_held = TRUE; 1696 } 1697 if (drop_struct_lock) 1698 rw_exit(&dn->dn_struct_rwlock); 1699 ASSERT3U(db->db_level+1, ==, parent->db_level); 1700 di = dbuf_dirty(parent, tx); 1701 if (parent_held) 1702 dbuf_rele(parent, FTAG); 1703 1704 mutex_enter(&db->db_mtx); 1705 /* 1706 * Since we've dropped the mutex, it's possible that 1707 * dbuf_undirty() might have changed this out from under us. 1708 */ 1709 if (db->db_last_dirty == dr || 1710 dn->dn_object == DMU_META_DNODE_OBJECT) { 1711 mutex_enter(&di->dt.di.dr_mtx); 1712 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1713 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1714 list_insert_tail(&di->dt.di.dr_children, dr); 1715 mutex_exit(&di->dt.di.dr_mtx); 1716 dr->dr_parent = di; 1717 } 1718 mutex_exit(&db->db_mtx); 1719 } else { 1720 ASSERT(db->db_level+1 == dn->dn_nlevels); 1721 ASSERT(db->db_blkid < dn->dn_nblkptr); 1722 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1723 mutex_enter(&dn->dn_mtx); 1724 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1725 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1726 mutex_exit(&dn->dn_mtx); 1727 if (drop_struct_lock) 1728 rw_exit(&dn->dn_struct_rwlock); 1729 } 1730 1731 dnode_setdirty(dn, tx); 1732 DB_DNODE_EXIT(db); 1733 return (dr); 1734 } 1735 1736 /* 1737 * Undirty a buffer in the transaction group referenced by the given 1738 * transaction. Return whether this evicted the dbuf. 1739 */ 1740 static boolean_t 1741 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1742 { 1743 dnode_t *dn; 1744 uint64_t txg = tx->tx_txg; 1745 dbuf_dirty_record_t *dr, **drp; 1746 1747 ASSERT(txg != 0); 1748 1749 /* 1750 * Due to our use of dn_nlevels below, this can only be called 1751 * in open context, unless we are operating on the MOS. 1752 * From syncing context, dn_nlevels may be different from the 1753 * dn_nlevels used when dbuf was dirtied. 1754 */ 1755 ASSERT(db->db_objset == 1756 dmu_objset_pool(db->db_objset)->dp_meta_objset || 1757 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1758 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1759 ASSERT0(db->db_level); 1760 ASSERT(MUTEX_HELD(&db->db_mtx)); 1761 1762 /* 1763 * If this buffer is not dirty, we're done. 1764 */ 1765 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1766 if (dr->dr_txg <= txg) 1767 break; 1768 if (dr == NULL || dr->dr_txg < txg) 1769 return (B_FALSE); 1770 ASSERT(dr->dr_txg == txg); 1771 ASSERT(dr->dr_dbuf == db); 1772 1773 DB_DNODE_ENTER(db); 1774 dn = DB_DNODE(db); 1775 1776 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1777 1778 ASSERT(db->db.db_size != 0); 1779 1780 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 1781 dr->dr_accounted, txg); 1782 1783 *drp = dr->dr_next; 1784 1785 /* 1786 * Note that there are three places in dbuf_dirty() 1787 * where this dirty record may be put on a list. 1788 * Make sure to do a list_remove corresponding to 1789 * every one of those list_insert calls. 1790 */ 1791 if (dr->dr_parent) { 1792 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1793 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1794 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1795 } else if (db->db_blkid == DMU_SPILL_BLKID || 1796 db->db_level + 1 == dn->dn_nlevels) { 1797 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1798 mutex_enter(&dn->dn_mtx); 1799 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1800 mutex_exit(&dn->dn_mtx); 1801 } 1802 DB_DNODE_EXIT(db); 1803 1804 if (db->db_state != DB_NOFILL) { 1805 dbuf_unoverride(dr); 1806 1807 ASSERT(db->db_buf != NULL); 1808 ASSERT(dr->dt.dl.dr_data != NULL); 1809 if (dr->dt.dl.dr_data != db->db_buf) 1810 arc_buf_destroy(dr->dt.dl.dr_data, db); 1811 } 1812 1813 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1814 1815 ASSERT(db->db_dirtycnt > 0); 1816 db->db_dirtycnt -= 1; 1817 1818 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1819 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1820 dbuf_destroy(db); 1821 return (B_TRUE); 1822 } 1823 1824 return (B_FALSE); 1825 } 1826 1827 void 1828 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1829 { 1830 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1831 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1832 1833 ASSERT(tx->tx_txg != 0); 1834 ASSERT(!refcount_is_zero(&db->db_holds)); 1835 1836 /* 1837 * Quick check for dirtyness. For already dirty blocks, this 1838 * reduces runtime of this function by >90%, and overall performance 1839 * by 50% for some workloads (e.g. file deletion with indirect blocks 1840 * cached). 1841 */ 1842 mutex_enter(&db->db_mtx); 1843 dbuf_dirty_record_t *dr; 1844 for (dr = db->db_last_dirty; 1845 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 1846 /* 1847 * It's possible that it is already dirty but not cached, 1848 * because there are some calls to dbuf_dirty() that don't 1849 * go through dmu_buf_will_dirty(). 1850 */ 1851 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 1852 /* This dbuf is already dirty and cached. */ 1853 dbuf_redirty(dr); 1854 mutex_exit(&db->db_mtx); 1855 return; 1856 } 1857 } 1858 mutex_exit(&db->db_mtx); 1859 1860 DB_DNODE_ENTER(db); 1861 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1862 rf |= DB_RF_HAVESTRUCT; 1863 DB_DNODE_EXIT(db); 1864 (void) dbuf_read(db, NULL, rf); 1865 (void) dbuf_dirty(db, tx); 1866 } 1867 1868 void 1869 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1870 { 1871 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1872 1873 db->db_state = DB_NOFILL; 1874 1875 dmu_buf_will_fill(db_fake, tx); 1876 } 1877 1878 void 1879 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1880 { 1881 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1882 1883 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1884 ASSERT(tx->tx_txg != 0); 1885 ASSERT(db->db_level == 0); 1886 ASSERT(!refcount_is_zero(&db->db_holds)); 1887 1888 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1889 dmu_tx_private_ok(tx)); 1890 1891 dbuf_noread(db); 1892 (void) dbuf_dirty(db, tx); 1893 } 1894 1895 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1896 /* ARGSUSED */ 1897 void 1898 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1899 { 1900 mutex_enter(&db->db_mtx); 1901 DBUF_VERIFY(db); 1902 1903 if (db->db_state == DB_FILL) { 1904 if (db->db_level == 0 && db->db_freed_in_flight) { 1905 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1906 /* we were freed while filling */ 1907 /* XXX dbuf_undirty? */ 1908 bzero(db->db.db_data, db->db.db_size); 1909 db->db_freed_in_flight = FALSE; 1910 } 1911 db->db_state = DB_CACHED; 1912 cv_broadcast(&db->db_changed); 1913 } 1914 mutex_exit(&db->db_mtx); 1915 } 1916 1917 void 1918 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 1919 bp_embedded_type_t etype, enum zio_compress comp, 1920 int uncompressed_size, int compressed_size, int byteorder, 1921 dmu_tx_t *tx) 1922 { 1923 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1924 struct dirty_leaf *dl; 1925 dmu_object_type_t type; 1926 1927 if (etype == BP_EMBEDDED_TYPE_DATA) { 1928 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1929 SPA_FEATURE_EMBEDDED_DATA)); 1930 } 1931 1932 DB_DNODE_ENTER(db); 1933 type = DB_DNODE(db)->dn_type; 1934 DB_DNODE_EXIT(db); 1935 1936 ASSERT0(db->db_level); 1937 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1938 1939 dmu_buf_will_not_fill(dbuf, tx); 1940 1941 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1942 dl = &db->db_last_dirty->dt.dl; 1943 encode_embedded_bp_compressed(&dl->dr_overridden_by, 1944 data, comp, uncompressed_size, compressed_size); 1945 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 1946 BP_SET_TYPE(&dl->dr_overridden_by, type); 1947 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 1948 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 1949 1950 dl->dr_override_state = DR_OVERRIDDEN; 1951 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 1952 } 1953 1954 /* 1955 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1956 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1957 */ 1958 void 1959 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1960 { 1961 ASSERT(!refcount_is_zero(&db->db_holds)); 1962 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1963 ASSERT(db->db_level == 0); 1964 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 1965 ASSERT(buf != NULL); 1966 ASSERT(arc_buf_lsize(buf) == db->db.db_size); 1967 ASSERT(tx->tx_txg != 0); 1968 1969 arc_return_buf(buf, db); 1970 ASSERT(arc_released(buf)); 1971 1972 mutex_enter(&db->db_mtx); 1973 1974 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1975 cv_wait(&db->db_changed, &db->db_mtx); 1976 1977 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1978 1979 if (db->db_state == DB_CACHED && 1980 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1981 mutex_exit(&db->db_mtx); 1982 (void) dbuf_dirty(db, tx); 1983 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1984 arc_buf_destroy(buf, db); 1985 xuio_stat_wbuf_copied(); 1986 return; 1987 } 1988 1989 xuio_stat_wbuf_nocopy(); 1990 if (db->db_state == DB_CACHED) { 1991 dbuf_dirty_record_t *dr = db->db_last_dirty; 1992 1993 ASSERT(db->db_buf != NULL); 1994 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1995 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1996 if (!arc_released(db->db_buf)) { 1997 ASSERT(dr->dt.dl.dr_override_state == 1998 DR_OVERRIDDEN); 1999 arc_release(db->db_buf, db); 2000 } 2001 dr->dt.dl.dr_data = buf; 2002 arc_buf_destroy(db->db_buf, db); 2003 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2004 arc_release(db->db_buf, db); 2005 arc_buf_destroy(db->db_buf, db); 2006 } 2007 db->db_buf = NULL; 2008 } 2009 ASSERT(db->db_buf == NULL); 2010 dbuf_set_data(db, buf); 2011 db->db_state = DB_FILL; 2012 mutex_exit(&db->db_mtx); 2013 (void) dbuf_dirty(db, tx); 2014 dmu_buf_fill_done(&db->db, tx); 2015 } 2016 2017 void 2018 dbuf_destroy(dmu_buf_impl_t *db) 2019 { 2020 dnode_t *dn; 2021 dmu_buf_impl_t *parent = db->db_parent; 2022 dmu_buf_impl_t *dndb; 2023 2024 ASSERT(MUTEX_HELD(&db->db_mtx)); 2025 ASSERT(refcount_is_zero(&db->db_holds)); 2026 2027 if (db->db_buf != NULL) { 2028 arc_buf_destroy(db->db_buf, db); 2029 db->db_buf = NULL; 2030 } 2031 2032 if (db->db_blkid == DMU_BONUS_BLKID) { 2033 ASSERT(db->db.db_data != NULL); 2034 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 2035 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2036 db->db_state = DB_UNCACHED; 2037 } 2038 2039 dbuf_clear_data(db); 2040 2041 if (multilist_link_active(&db->db_cache_link)) { 2042 multilist_remove(dbuf_cache, db); 2043 (void) refcount_remove_many(&dbuf_cache_size, 2044 db->db.db_size, db); 2045 } 2046 2047 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2048 ASSERT(db->db_data_pending == NULL); 2049 2050 db->db_state = DB_EVICTING; 2051 db->db_blkptr = NULL; 2052 2053 /* 2054 * Now that db_state is DB_EVICTING, nobody else can find this via 2055 * the hash table. We can now drop db_mtx, which allows us to 2056 * acquire the dn_dbufs_mtx. 2057 */ 2058 mutex_exit(&db->db_mtx); 2059 2060 DB_DNODE_ENTER(db); 2061 dn = DB_DNODE(db); 2062 dndb = dn->dn_dbuf; 2063 if (db->db_blkid != DMU_BONUS_BLKID) { 2064 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2065 if (needlock) 2066 mutex_enter(&dn->dn_dbufs_mtx); 2067 avl_remove(&dn->dn_dbufs, db); 2068 atomic_dec_32(&dn->dn_dbufs_count); 2069 membar_producer(); 2070 DB_DNODE_EXIT(db); 2071 if (needlock) 2072 mutex_exit(&dn->dn_dbufs_mtx); 2073 /* 2074 * Decrementing the dbuf count means that the hold corresponding 2075 * to the removed dbuf is no longer discounted in dnode_move(), 2076 * so the dnode cannot be moved until after we release the hold. 2077 * The membar_producer() ensures visibility of the decremented 2078 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2079 * release any lock. 2080 */ 2081 dnode_rele(dn, db); 2082 db->db_dnode_handle = NULL; 2083 2084 dbuf_hash_remove(db); 2085 } else { 2086 DB_DNODE_EXIT(db); 2087 } 2088 2089 ASSERT(refcount_is_zero(&db->db_holds)); 2090 2091 db->db_parent = NULL; 2092 2093 ASSERT(db->db_buf == NULL); 2094 ASSERT(db->db.db_data == NULL); 2095 ASSERT(db->db_hash_next == NULL); 2096 ASSERT(db->db_blkptr == NULL); 2097 ASSERT(db->db_data_pending == NULL); 2098 ASSERT(!multilist_link_active(&db->db_cache_link)); 2099 2100 kmem_cache_free(dbuf_kmem_cache, db); 2101 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2102 2103 /* 2104 * If this dbuf is referenced from an indirect dbuf, 2105 * decrement the ref count on the indirect dbuf. 2106 */ 2107 if (parent && parent != dndb) 2108 dbuf_rele(parent, db); 2109 } 2110 2111 /* 2112 * Note: While bpp will always be updated if the function returns success, 2113 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2114 * this happens when the dnode is the meta-dnode, or a userused or groupused 2115 * object. 2116 */ 2117 static int 2118 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2119 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2120 { 2121 *parentp = NULL; 2122 *bpp = NULL; 2123 2124 ASSERT(blkid != DMU_BONUS_BLKID); 2125 2126 if (blkid == DMU_SPILL_BLKID) { 2127 mutex_enter(&dn->dn_mtx); 2128 if (dn->dn_have_spill && 2129 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 2130 *bpp = &dn->dn_phys->dn_spill; 2131 else 2132 *bpp = NULL; 2133 dbuf_add_ref(dn->dn_dbuf, NULL); 2134 *parentp = dn->dn_dbuf; 2135 mutex_exit(&dn->dn_mtx); 2136 return (0); 2137 } 2138 2139 int nlevels = 2140 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 2141 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2142 2143 ASSERT3U(level * epbs, <, 64); 2144 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2145 /* 2146 * This assertion shouldn't trip as long as the max indirect block size 2147 * is less than 1M. The reason for this is that up to that point, 2148 * the number of levels required to address an entire object with blocks 2149 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 2150 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 2151 * (i.e. we can address the entire object), objects will all use at most 2152 * N-1 levels and the assertion won't overflow. However, once epbs is 2153 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 2154 * enough to address an entire object, so objects will have 5 levels, 2155 * but then this assertion will overflow. 2156 * 2157 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 2158 * need to redo this logic to handle overflows. 2159 */ 2160 ASSERT(level >= nlevels || 2161 ((nlevels - level - 1) * epbs) + 2162 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2163 if (level >= nlevels || 2164 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 2165 ((nlevels - level - 1) * epbs)) || 2166 (fail_sparse && 2167 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2168 /* the buffer has no parent yet */ 2169 return (SET_ERROR(ENOENT)); 2170 } else if (level < nlevels-1) { 2171 /* this block is referenced from an indirect block */ 2172 int err = dbuf_hold_impl(dn, level+1, 2173 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2174 if (err) 2175 return (err); 2176 err = dbuf_read(*parentp, NULL, 2177 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2178 if (err) { 2179 dbuf_rele(*parentp, NULL); 2180 *parentp = NULL; 2181 return (err); 2182 } 2183 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2184 (blkid & ((1ULL << epbs) - 1)); 2185 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 2186 ASSERT(BP_IS_HOLE(*bpp)); 2187 return (0); 2188 } else { 2189 /* the block is referenced from the dnode */ 2190 ASSERT3U(level, ==, nlevels-1); 2191 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2192 blkid < dn->dn_phys->dn_nblkptr); 2193 if (dn->dn_dbuf) { 2194 dbuf_add_ref(dn->dn_dbuf, NULL); 2195 *parentp = dn->dn_dbuf; 2196 } 2197 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2198 return (0); 2199 } 2200 } 2201 2202 static dmu_buf_impl_t * 2203 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2204 dmu_buf_impl_t *parent, blkptr_t *blkptr) 2205 { 2206 objset_t *os = dn->dn_objset; 2207 dmu_buf_impl_t *db, *odb; 2208 2209 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2210 ASSERT(dn->dn_type != DMU_OT_NONE); 2211 2212 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2213 2214 db->db_objset = os; 2215 db->db.db_object = dn->dn_object; 2216 db->db_level = level; 2217 db->db_blkid = blkid; 2218 db->db_last_dirty = NULL; 2219 db->db_dirtycnt = 0; 2220 db->db_dnode_handle = dn->dn_handle; 2221 db->db_parent = parent; 2222 db->db_blkptr = blkptr; 2223 2224 db->db_user = NULL; 2225 db->db_user_immediate_evict = FALSE; 2226 db->db_freed_in_flight = FALSE; 2227 db->db_pending_evict = FALSE; 2228 2229 if (blkid == DMU_BONUS_BLKID) { 2230 ASSERT3P(parent, ==, dn->dn_dbuf); 2231 db->db.db_size = DN_MAX_BONUSLEN - 2232 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 2233 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 2234 db->db.db_offset = DMU_BONUS_BLKID; 2235 db->db_state = DB_UNCACHED; 2236 /* the bonus dbuf is not placed in the hash table */ 2237 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2238 return (db); 2239 } else if (blkid == DMU_SPILL_BLKID) { 2240 db->db.db_size = (blkptr != NULL) ? 2241 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 2242 db->db.db_offset = 0; 2243 } else { 2244 int blocksize = 2245 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2246 db->db.db_size = blocksize; 2247 db->db.db_offset = db->db_blkid * blocksize; 2248 } 2249 2250 /* 2251 * Hold the dn_dbufs_mtx while we get the new dbuf 2252 * in the hash table *and* added to the dbufs list. 2253 * This prevents a possible deadlock with someone 2254 * trying to look up this dbuf before its added to the 2255 * dn_dbufs list. 2256 */ 2257 mutex_enter(&dn->dn_dbufs_mtx); 2258 db->db_state = DB_EVICTING; 2259 if ((odb = dbuf_hash_insert(db)) != NULL) { 2260 /* someone else inserted it first */ 2261 kmem_cache_free(dbuf_kmem_cache, db); 2262 mutex_exit(&dn->dn_dbufs_mtx); 2263 return (odb); 2264 } 2265 avl_add(&dn->dn_dbufs, db); 2266 2267 db->db_state = DB_UNCACHED; 2268 mutex_exit(&dn->dn_dbufs_mtx); 2269 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2270 2271 if (parent && parent != dn->dn_dbuf) 2272 dbuf_add_ref(parent, db); 2273 2274 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2275 refcount_count(&dn->dn_holds) > 0); 2276 (void) refcount_add(&dn->dn_holds, db); 2277 atomic_inc_32(&dn->dn_dbufs_count); 2278 2279 dprintf_dbuf(db, "db=%p\n", db); 2280 2281 return (db); 2282 } 2283 2284 typedef struct dbuf_prefetch_arg { 2285 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2286 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2287 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2288 int dpa_curlevel; /* The current level that we're reading */ 2289 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2290 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2291 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2292 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2293 } dbuf_prefetch_arg_t; 2294 2295 /* 2296 * Actually issue the prefetch read for the block given. 2297 */ 2298 static void 2299 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2300 { 2301 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2302 return; 2303 2304 arc_flags_t aflags = 2305 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2306 2307 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2308 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2309 ASSERT(dpa->dpa_zio != NULL); 2310 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2311 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2312 &aflags, &dpa->dpa_zb); 2313 } 2314 2315 /* 2316 * Called when an indirect block above our prefetch target is read in. This 2317 * will either read in the next indirect block down the tree or issue the actual 2318 * prefetch if the next block down is our target. 2319 */ 2320 static void 2321 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2322 { 2323 dbuf_prefetch_arg_t *dpa = private; 2324 2325 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2326 ASSERT3S(dpa->dpa_curlevel, >, 0); 2327 2328 /* 2329 * The dpa_dnode is only valid if we are called with a NULL 2330 * zio. This indicates that the arc_read() returned without 2331 * first calling zio_read() to issue a physical read. Once 2332 * a physical read is made the dpa_dnode must be invalidated 2333 * as the locks guarding it may have been dropped. If the 2334 * dpa_dnode is still valid, then we want to add it to the dbuf 2335 * cache. To do so, we must hold the dbuf associated with the block 2336 * we just prefetched, read its contents so that we associate it 2337 * with an arc_buf_t, and then release it. 2338 */ 2339 if (zio != NULL) { 2340 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2341 if (zio->io_flags & ZIO_FLAG_RAW) { 2342 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2343 } else { 2344 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2345 } 2346 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2347 2348 dpa->dpa_dnode = NULL; 2349 } else if (dpa->dpa_dnode != NULL) { 2350 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2351 (dpa->dpa_epbs * (dpa->dpa_curlevel - 2352 dpa->dpa_zb.zb_level)); 2353 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2354 dpa->dpa_curlevel, curblkid, FTAG); 2355 (void) dbuf_read(db, NULL, 2356 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2357 dbuf_rele(db, FTAG); 2358 } 2359 2360 dpa->dpa_curlevel--; 2361 2362 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2363 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2364 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2365 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2366 if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { 2367 kmem_free(dpa, sizeof (*dpa)); 2368 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2369 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2370 dbuf_issue_final_prefetch(dpa, bp); 2371 kmem_free(dpa, sizeof (*dpa)); 2372 } else { 2373 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2374 zbookmark_phys_t zb; 2375 2376 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2377 2378 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2379 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2380 2381 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2382 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2383 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2384 &iter_aflags, &zb); 2385 } 2386 2387 arc_buf_destroy(abuf, private); 2388 } 2389 2390 /* 2391 * Issue prefetch reads for the given block on the given level. If the indirect 2392 * blocks above that block are not in memory, we will read them in 2393 * asynchronously. As a result, this call never blocks waiting for a read to 2394 * complete. 2395 */ 2396 void 2397 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2398 arc_flags_t aflags) 2399 { 2400 blkptr_t bp; 2401 int epbs, nlevels, curlevel; 2402 uint64_t curblkid; 2403 2404 ASSERT(blkid != DMU_BONUS_BLKID); 2405 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2406 2407 if (blkid > dn->dn_maxblkid) 2408 return; 2409 2410 if (dnode_block_freed(dn, blkid)) 2411 return; 2412 2413 /* 2414 * This dnode hasn't been written to disk yet, so there's nothing to 2415 * prefetch. 2416 */ 2417 nlevels = dn->dn_phys->dn_nlevels; 2418 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2419 return; 2420 2421 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2422 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2423 return; 2424 2425 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2426 level, blkid); 2427 if (db != NULL) { 2428 mutex_exit(&db->db_mtx); 2429 /* 2430 * This dbuf already exists. It is either CACHED, or 2431 * (we assume) about to be read or filled. 2432 */ 2433 return; 2434 } 2435 2436 /* 2437 * Find the closest ancestor (indirect block) of the target block 2438 * that is present in the cache. In this indirect block, we will 2439 * find the bp that is at curlevel, curblkid. 2440 */ 2441 curlevel = level; 2442 curblkid = blkid; 2443 while (curlevel < nlevels - 1) { 2444 int parent_level = curlevel + 1; 2445 uint64_t parent_blkid = curblkid >> epbs; 2446 dmu_buf_impl_t *db; 2447 2448 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2449 FALSE, TRUE, FTAG, &db) == 0) { 2450 blkptr_t *bpp = db->db_buf->b_data; 2451 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2452 dbuf_rele(db, FTAG); 2453 break; 2454 } 2455 2456 curlevel = parent_level; 2457 curblkid = parent_blkid; 2458 } 2459 2460 if (curlevel == nlevels - 1) { 2461 /* No cached indirect blocks found. */ 2462 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2463 bp = dn->dn_phys->dn_blkptr[curblkid]; 2464 } 2465 if (BP_IS_HOLE(&bp)) 2466 return; 2467 2468 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2469 2470 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2471 ZIO_FLAG_CANFAIL); 2472 2473 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2474 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2475 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2476 dn->dn_object, level, blkid); 2477 dpa->dpa_curlevel = curlevel; 2478 dpa->dpa_prio = prio; 2479 dpa->dpa_aflags = aflags; 2480 dpa->dpa_spa = dn->dn_objset->os_spa; 2481 dpa->dpa_dnode = dn; 2482 dpa->dpa_epbs = epbs; 2483 dpa->dpa_zio = pio; 2484 2485 /* 2486 * If we have the indirect just above us, no need to do the asynchronous 2487 * prefetch chain; we'll just run the last step ourselves. If we're at 2488 * a higher level, though, we want to issue the prefetches for all the 2489 * indirect blocks asynchronously, so we can go on with whatever we were 2490 * doing. 2491 */ 2492 if (curlevel == level) { 2493 ASSERT3U(curblkid, ==, blkid); 2494 dbuf_issue_final_prefetch(dpa, &bp); 2495 kmem_free(dpa, sizeof (*dpa)); 2496 } else { 2497 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2498 zbookmark_phys_t zb; 2499 2500 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2501 dn->dn_object, curlevel, curblkid); 2502 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2503 &bp, dbuf_prefetch_indirect_done, dpa, prio, 2504 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2505 &iter_aflags, &zb); 2506 } 2507 /* 2508 * We use pio here instead of dpa_zio since it's possible that 2509 * dpa may have already been freed. 2510 */ 2511 zio_nowait(pio); 2512 } 2513 2514 /* 2515 * Returns with db_holds incremented, and db_mtx not held. 2516 * Note: dn_struct_rwlock must be held. 2517 */ 2518 int 2519 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2520 boolean_t fail_sparse, boolean_t fail_uncached, 2521 void *tag, dmu_buf_impl_t **dbp) 2522 { 2523 dmu_buf_impl_t *db, *parent = NULL; 2524 2525 ASSERT(blkid != DMU_BONUS_BLKID); 2526 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2527 ASSERT3U(dn->dn_nlevels, >, level); 2528 2529 *dbp = NULL; 2530 top: 2531 /* dbuf_find() returns with db_mtx held */ 2532 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2533 2534 if (db == NULL) { 2535 blkptr_t *bp = NULL; 2536 int err; 2537 2538 if (fail_uncached) 2539 return (SET_ERROR(ENOENT)); 2540 2541 ASSERT3P(parent, ==, NULL); 2542 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2543 if (fail_sparse) { 2544 if (err == 0 && bp && BP_IS_HOLE(bp)) 2545 err = SET_ERROR(ENOENT); 2546 if (err) { 2547 if (parent) 2548 dbuf_rele(parent, NULL); 2549 return (err); 2550 } 2551 } 2552 if (err && err != ENOENT) 2553 return (err); 2554 db = dbuf_create(dn, level, blkid, parent, bp); 2555 } 2556 2557 if (fail_uncached && db->db_state != DB_CACHED) { 2558 mutex_exit(&db->db_mtx); 2559 return (SET_ERROR(ENOENT)); 2560 } 2561 2562 if (db->db_buf != NULL) 2563 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2564 2565 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2566 2567 /* 2568 * If this buffer is currently syncing out, and we are are 2569 * still referencing it from db_data, we need to make a copy 2570 * of it in case we decide we want to dirty it again in this txg. 2571 */ 2572 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2573 dn->dn_object != DMU_META_DNODE_OBJECT && 2574 db->db_state == DB_CACHED && db->db_data_pending) { 2575 dbuf_dirty_record_t *dr = db->db_data_pending; 2576 2577 if (dr->dt.dl.dr_data == db->db_buf) { 2578 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2579 2580 dbuf_set_data(db, 2581 arc_alloc_buf(dn->dn_objset->os_spa, db, type, 2582 db->db.db_size)); 2583 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2584 db->db.db_size); 2585 } 2586 } 2587 2588 if (multilist_link_active(&db->db_cache_link)) { 2589 ASSERT(refcount_is_zero(&db->db_holds)); 2590 multilist_remove(dbuf_cache, db); 2591 (void) refcount_remove_many(&dbuf_cache_size, 2592 db->db.db_size, db); 2593 } 2594 (void) refcount_add(&db->db_holds, tag); 2595 DBUF_VERIFY(db); 2596 mutex_exit(&db->db_mtx); 2597 2598 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2599 if (parent) 2600 dbuf_rele(parent, NULL); 2601 2602 ASSERT3P(DB_DNODE(db), ==, dn); 2603 ASSERT3U(db->db_blkid, ==, blkid); 2604 ASSERT3U(db->db_level, ==, level); 2605 *dbp = db; 2606 2607 return (0); 2608 } 2609 2610 dmu_buf_impl_t * 2611 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2612 { 2613 return (dbuf_hold_level(dn, 0, blkid, tag)); 2614 } 2615 2616 dmu_buf_impl_t * 2617 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2618 { 2619 dmu_buf_impl_t *db; 2620 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2621 return (err ? NULL : db); 2622 } 2623 2624 void 2625 dbuf_create_bonus(dnode_t *dn) 2626 { 2627 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2628 2629 ASSERT(dn->dn_bonus == NULL); 2630 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 2631 } 2632 2633 int 2634 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 2635 { 2636 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2637 dnode_t *dn; 2638 2639 if (db->db_blkid != DMU_SPILL_BLKID) 2640 return (SET_ERROR(ENOTSUP)); 2641 if (blksz == 0) 2642 blksz = SPA_MINBLOCKSIZE; 2643 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2644 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 2645 2646 DB_DNODE_ENTER(db); 2647 dn = DB_DNODE(db); 2648 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2649 dbuf_new_size(db, blksz, tx); 2650 rw_exit(&dn->dn_struct_rwlock); 2651 DB_DNODE_EXIT(db); 2652 2653 return (0); 2654 } 2655 2656 void 2657 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 2658 { 2659 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2660 } 2661 2662 #pragma weak dmu_buf_add_ref = dbuf_add_ref 2663 void 2664 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2665 { 2666 int64_t holds = refcount_add(&db->db_holds, tag); 2667 ASSERT3S(holds, >, 1); 2668 } 2669 2670 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2671 boolean_t 2672 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2673 void *tag) 2674 { 2675 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2676 dmu_buf_impl_t *found_db; 2677 boolean_t result = B_FALSE; 2678 2679 if (db->db_blkid == DMU_BONUS_BLKID) 2680 found_db = dbuf_find_bonus(os, obj); 2681 else 2682 found_db = dbuf_find(os, obj, 0, blkid); 2683 2684 if (found_db != NULL) { 2685 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2686 (void) refcount_add(&db->db_holds, tag); 2687 result = B_TRUE; 2688 } 2689 mutex_exit(&db->db_mtx); 2690 } 2691 return (result); 2692 } 2693 2694 /* 2695 * If you call dbuf_rele() you had better not be referencing the dnode handle 2696 * unless you have some other direct or indirect hold on the dnode. (An indirect 2697 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2698 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2699 * dnode's parent dbuf evicting its dnode handles. 2700 */ 2701 void 2702 dbuf_rele(dmu_buf_impl_t *db, void *tag) 2703 { 2704 mutex_enter(&db->db_mtx); 2705 dbuf_rele_and_unlock(db, tag); 2706 } 2707 2708 void 2709 dmu_buf_rele(dmu_buf_t *db, void *tag) 2710 { 2711 dbuf_rele((dmu_buf_impl_t *)db, tag); 2712 } 2713 2714 /* 2715 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2716 * db_dirtycnt and db_holds to be updated atomically. 2717 */ 2718 void 2719 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2720 { 2721 int64_t holds; 2722 2723 ASSERT(MUTEX_HELD(&db->db_mtx)); 2724 DBUF_VERIFY(db); 2725 2726 /* 2727 * Remove the reference to the dbuf before removing its hold on the 2728 * dnode so we can guarantee in dnode_move() that a referenced bonus 2729 * buffer has a corresponding dnode hold. 2730 */ 2731 holds = refcount_remove(&db->db_holds, tag); 2732 ASSERT(holds >= 0); 2733 2734 /* 2735 * We can't freeze indirects if there is a possibility that they 2736 * may be modified in the current syncing context. 2737 */ 2738 if (db->db_buf != NULL && 2739 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 2740 arc_buf_freeze(db->db_buf); 2741 } 2742 2743 if (holds == db->db_dirtycnt && 2744 db->db_level == 0 && db->db_user_immediate_evict) 2745 dbuf_evict_user(db); 2746 2747 if (holds == 0) { 2748 if (db->db_blkid == DMU_BONUS_BLKID) { 2749 dnode_t *dn; 2750 boolean_t evict_dbuf = db->db_pending_evict; 2751 2752 /* 2753 * If the dnode moves here, we cannot cross this 2754 * barrier until the move completes. 2755 */ 2756 DB_DNODE_ENTER(db); 2757 2758 dn = DB_DNODE(db); 2759 atomic_dec_32(&dn->dn_dbufs_count); 2760 2761 /* 2762 * Decrementing the dbuf count means that the bonus 2763 * buffer's dnode hold is no longer discounted in 2764 * dnode_move(). The dnode cannot move until after 2765 * the dnode_rele() below. 2766 */ 2767 DB_DNODE_EXIT(db); 2768 2769 /* 2770 * Do not reference db after its lock is dropped. 2771 * Another thread may evict it. 2772 */ 2773 mutex_exit(&db->db_mtx); 2774 2775 if (evict_dbuf) 2776 dnode_evict_bonus(dn); 2777 2778 dnode_rele(dn, db); 2779 } else if (db->db_buf == NULL) { 2780 /* 2781 * This is a special case: we never associated this 2782 * dbuf with any data allocated from the ARC. 2783 */ 2784 ASSERT(db->db_state == DB_UNCACHED || 2785 db->db_state == DB_NOFILL); 2786 dbuf_destroy(db); 2787 } else if (arc_released(db->db_buf)) { 2788 /* 2789 * This dbuf has anonymous data associated with it. 2790 */ 2791 dbuf_destroy(db); 2792 } else { 2793 boolean_t do_arc_evict = B_FALSE; 2794 blkptr_t bp; 2795 spa_t *spa = dmu_objset_spa(db->db_objset); 2796 2797 if (!DBUF_IS_CACHEABLE(db) && 2798 db->db_blkptr != NULL && 2799 !BP_IS_HOLE(db->db_blkptr) && 2800 !BP_IS_EMBEDDED(db->db_blkptr)) { 2801 do_arc_evict = B_TRUE; 2802 bp = *db->db_blkptr; 2803 } 2804 2805 if (!DBUF_IS_CACHEABLE(db) || 2806 db->db_pending_evict) { 2807 dbuf_destroy(db); 2808 } else if (!multilist_link_active(&db->db_cache_link)) { 2809 multilist_insert(dbuf_cache, db); 2810 (void) refcount_add_many(&dbuf_cache_size, 2811 db->db.db_size, db); 2812 mutex_exit(&db->db_mtx); 2813 2814 dbuf_evict_notify(); 2815 } 2816 2817 if (do_arc_evict) 2818 arc_freed(spa, &bp); 2819 } 2820 } else { 2821 mutex_exit(&db->db_mtx); 2822 } 2823 2824 } 2825 2826 #pragma weak dmu_buf_refcount = dbuf_refcount 2827 uint64_t 2828 dbuf_refcount(dmu_buf_impl_t *db) 2829 { 2830 return (refcount_count(&db->db_holds)); 2831 } 2832 2833 void * 2834 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2835 dmu_buf_user_t *new_user) 2836 { 2837 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2838 2839 mutex_enter(&db->db_mtx); 2840 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2841 if (db->db_user == old_user) 2842 db->db_user = new_user; 2843 else 2844 old_user = db->db_user; 2845 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2846 mutex_exit(&db->db_mtx); 2847 2848 return (old_user); 2849 } 2850 2851 void * 2852 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2853 { 2854 return (dmu_buf_replace_user(db_fake, NULL, user)); 2855 } 2856 2857 void * 2858 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2859 { 2860 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2861 2862 db->db_user_immediate_evict = TRUE; 2863 return (dmu_buf_set_user(db_fake, user)); 2864 } 2865 2866 void * 2867 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2868 { 2869 return (dmu_buf_replace_user(db_fake, user, NULL)); 2870 } 2871 2872 void * 2873 dmu_buf_get_user(dmu_buf_t *db_fake) 2874 { 2875 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2876 2877 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2878 return (db->db_user); 2879 } 2880 2881 void 2882 dmu_buf_user_evict_wait() 2883 { 2884 taskq_wait(dbu_evict_taskq); 2885 } 2886 2887 blkptr_t * 2888 dmu_buf_get_blkptr(dmu_buf_t *db) 2889 { 2890 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2891 return (dbi->db_blkptr); 2892 } 2893 2894 objset_t * 2895 dmu_buf_get_objset(dmu_buf_t *db) 2896 { 2897 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2898 return (dbi->db_objset); 2899 } 2900 2901 dnode_t * 2902 dmu_buf_dnode_enter(dmu_buf_t *db) 2903 { 2904 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2905 DB_DNODE_ENTER(dbi); 2906 return (DB_DNODE(dbi)); 2907 } 2908 2909 void 2910 dmu_buf_dnode_exit(dmu_buf_t *db) 2911 { 2912 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2913 DB_DNODE_EXIT(dbi); 2914 } 2915 2916 static void 2917 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2918 { 2919 /* ASSERT(dmu_tx_is_syncing(tx) */ 2920 ASSERT(MUTEX_HELD(&db->db_mtx)); 2921 2922 if (db->db_blkptr != NULL) 2923 return; 2924 2925 if (db->db_blkid == DMU_SPILL_BLKID) { 2926 db->db_blkptr = &dn->dn_phys->dn_spill; 2927 BP_ZERO(db->db_blkptr); 2928 return; 2929 } 2930 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2931 /* 2932 * This buffer was allocated at a time when there was 2933 * no available blkptrs from the dnode, or it was 2934 * inappropriate to hook it in (i.e., nlevels mis-match). 2935 */ 2936 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2937 ASSERT(db->db_parent == NULL); 2938 db->db_parent = dn->dn_dbuf; 2939 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2940 DBUF_VERIFY(db); 2941 } else { 2942 dmu_buf_impl_t *parent = db->db_parent; 2943 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2944 2945 ASSERT(dn->dn_phys->dn_nlevels > 1); 2946 if (parent == NULL) { 2947 mutex_exit(&db->db_mtx); 2948 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2949 parent = dbuf_hold_level(dn, db->db_level + 1, 2950 db->db_blkid >> epbs, db); 2951 rw_exit(&dn->dn_struct_rwlock); 2952 mutex_enter(&db->db_mtx); 2953 db->db_parent = parent; 2954 } 2955 db->db_blkptr = (blkptr_t *)parent->db.db_data + 2956 (db->db_blkid & ((1ULL << epbs) - 1)); 2957 DBUF_VERIFY(db); 2958 } 2959 } 2960 2961 static void 2962 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2963 { 2964 dmu_buf_impl_t *db = dr->dr_dbuf; 2965 dnode_t *dn; 2966 zio_t *zio; 2967 2968 ASSERT(dmu_tx_is_syncing(tx)); 2969 2970 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2971 2972 mutex_enter(&db->db_mtx); 2973 2974 ASSERT(db->db_level > 0); 2975 DBUF_VERIFY(db); 2976 2977 /* Read the block if it hasn't been read yet. */ 2978 if (db->db_buf == NULL) { 2979 mutex_exit(&db->db_mtx); 2980 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2981 mutex_enter(&db->db_mtx); 2982 } 2983 ASSERT3U(db->db_state, ==, DB_CACHED); 2984 ASSERT(db->db_buf != NULL); 2985 2986 DB_DNODE_ENTER(db); 2987 dn = DB_DNODE(db); 2988 /* Indirect block size must match what the dnode thinks it is. */ 2989 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2990 dbuf_check_blkptr(dn, db); 2991 DB_DNODE_EXIT(db); 2992 2993 /* Provide the pending dirty record to child dbufs */ 2994 db->db_data_pending = dr; 2995 2996 mutex_exit(&db->db_mtx); 2997 dbuf_write(dr, db->db_buf, tx); 2998 2999 zio = dr->dr_zio; 3000 mutex_enter(&dr->dt.di.dr_mtx); 3001 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3002 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3003 mutex_exit(&dr->dt.di.dr_mtx); 3004 zio_nowait(zio); 3005 } 3006 3007 static void 3008 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3009 { 3010 arc_buf_t **datap = &dr->dt.dl.dr_data; 3011 dmu_buf_impl_t *db = dr->dr_dbuf; 3012 dnode_t *dn; 3013 objset_t *os; 3014 uint64_t txg = tx->tx_txg; 3015 3016 ASSERT(dmu_tx_is_syncing(tx)); 3017 3018 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3019 3020 mutex_enter(&db->db_mtx); 3021 /* 3022 * To be synced, we must be dirtied. But we 3023 * might have been freed after the dirty. 3024 */ 3025 if (db->db_state == DB_UNCACHED) { 3026 /* This buffer has been freed since it was dirtied */ 3027 ASSERT(db->db.db_data == NULL); 3028 } else if (db->db_state == DB_FILL) { 3029 /* This buffer was freed and is now being re-filled */ 3030 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3031 } else { 3032 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3033 } 3034 DBUF_VERIFY(db); 3035 3036 DB_DNODE_ENTER(db); 3037 dn = DB_DNODE(db); 3038 3039 if (db->db_blkid == DMU_SPILL_BLKID) { 3040 mutex_enter(&dn->dn_mtx); 3041 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 3042 mutex_exit(&dn->dn_mtx); 3043 } 3044 3045 /* 3046 * If this is a bonus buffer, simply copy the bonus data into the 3047 * dnode. It will be written out when the dnode is synced (and it 3048 * will be synced, since it must have been dirty for dbuf_sync to 3049 * be called). 3050 */ 3051 if (db->db_blkid == DMU_BONUS_BLKID) { 3052 dbuf_dirty_record_t **drp; 3053 3054 ASSERT(*datap != NULL); 3055 ASSERT0(db->db_level); 3056 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 3057 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 3058 DB_DNODE_EXIT(db); 3059 3060 if (*datap != db->db.db_data) { 3061 zio_buf_free(*datap, DN_MAX_BONUSLEN); 3062 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 3063 } 3064 db->db_data_pending = NULL; 3065 drp = &db->db_last_dirty; 3066 while (*drp != dr) 3067 drp = &(*drp)->dr_next; 3068 ASSERT(dr->dr_next == NULL); 3069 ASSERT(dr->dr_dbuf == db); 3070 *drp = dr->dr_next; 3071 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3072 ASSERT(db->db_dirtycnt > 0); 3073 db->db_dirtycnt -= 1; 3074 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 3075 return; 3076 } 3077 3078 os = dn->dn_objset; 3079 3080 /* 3081 * This function may have dropped the db_mtx lock allowing a dmu_sync 3082 * operation to sneak in. As a result, we need to ensure that we 3083 * don't check the dr_override_state until we have returned from 3084 * dbuf_check_blkptr. 3085 */ 3086 dbuf_check_blkptr(dn, db); 3087 3088 /* 3089 * If this buffer is in the middle of an immediate write, 3090 * wait for the synchronous IO to complete. 3091 */ 3092 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3093 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3094 cv_wait(&db->db_changed, &db->db_mtx); 3095 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3096 } 3097 3098 if (db->db_state != DB_NOFILL && 3099 dn->dn_object != DMU_META_DNODE_OBJECT && 3100 refcount_count(&db->db_holds) > 1 && 3101 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3102 *datap == db->db_buf) { 3103 /* 3104 * If this buffer is currently "in use" (i.e., there 3105 * are active holds and db_data still references it), 3106 * then make a copy before we start the write so that 3107 * any modifications from the open txg will not leak 3108 * into this write. 3109 * 3110 * NOTE: this copy does not need to be made for 3111 * objects only modified in the syncing context (e.g. 3112 * DNONE_DNODE blocks). 3113 */ 3114 int psize = arc_buf_size(*datap); 3115 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 3116 enum zio_compress compress_type = arc_get_compression(*datap); 3117 3118 if (compress_type == ZIO_COMPRESS_OFF) { 3119 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 3120 } else { 3121 ASSERT3U(type, ==, ARC_BUFC_DATA); 3122 int lsize = arc_buf_lsize(*datap); 3123 *datap = arc_alloc_compressed_buf(os->os_spa, db, 3124 psize, lsize, compress_type); 3125 } 3126 bcopy(db->db.db_data, (*datap)->b_data, psize); 3127 } 3128 db->db_data_pending = dr; 3129 3130 mutex_exit(&db->db_mtx); 3131 3132 dbuf_write(dr, *datap, tx); 3133 3134 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3135 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3136 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3137 DB_DNODE_EXIT(db); 3138 } else { 3139 /* 3140 * Although zio_nowait() does not "wait for an IO", it does 3141 * initiate the IO. If this is an empty write it seems plausible 3142 * that the IO could actually be completed before the nowait 3143 * returns. We need to DB_DNODE_EXIT() first in case 3144 * zio_nowait() invalidates the dbuf. 3145 */ 3146 DB_DNODE_EXIT(db); 3147 zio_nowait(dr->dr_zio); 3148 } 3149 } 3150 3151 void 3152 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3153 { 3154 dbuf_dirty_record_t *dr; 3155 3156 while (dr = list_head(list)) { 3157 if (dr->dr_zio != NULL) { 3158 /* 3159 * If we find an already initialized zio then we 3160 * are processing the meta-dnode, and we have finished. 3161 * The dbufs for all dnodes are put back on the list 3162 * during processing, so that we can zio_wait() 3163 * these IOs after initiating all child IOs. 3164 */ 3165 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3166 DMU_META_DNODE_OBJECT); 3167 break; 3168 } 3169 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 3170 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 3171 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 3172 } 3173 list_remove(list, dr); 3174 if (dr->dr_dbuf->db_level > 0) 3175 dbuf_sync_indirect(dr, tx); 3176 else 3177 dbuf_sync_leaf(dr, tx); 3178 } 3179 } 3180 3181 /* ARGSUSED */ 3182 static void 3183 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3184 { 3185 dmu_buf_impl_t *db = vdb; 3186 dnode_t *dn; 3187 blkptr_t *bp = zio->io_bp; 3188 blkptr_t *bp_orig = &zio->io_bp_orig; 3189 spa_t *spa = zio->io_spa; 3190 int64_t delta; 3191 uint64_t fill = 0; 3192 int i; 3193 3194 ASSERT3P(db->db_blkptr, !=, NULL); 3195 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3196 3197 DB_DNODE_ENTER(db); 3198 dn = DB_DNODE(db); 3199 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3200 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3201 zio->io_prev_space_delta = delta; 3202 3203 if (bp->blk_birth != 0) { 3204 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 3205 BP_GET_TYPE(bp) == dn->dn_type) || 3206 (db->db_blkid == DMU_SPILL_BLKID && 3207 BP_GET_TYPE(bp) == dn->dn_bonustype) || 3208 BP_IS_EMBEDDED(bp)); 3209 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 3210 } 3211 3212 mutex_enter(&db->db_mtx); 3213 3214 #ifdef ZFS_DEBUG 3215 if (db->db_blkid == DMU_SPILL_BLKID) { 3216 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3217 ASSERT(!(BP_IS_HOLE(bp)) && 3218 db->db_blkptr == &dn->dn_phys->dn_spill); 3219 } 3220 #endif 3221 3222 if (db->db_level == 0) { 3223 mutex_enter(&dn->dn_mtx); 3224 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 3225 db->db_blkid != DMU_SPILL_BLKID) 3226 dn->dn_phys->dn_maxblkid = db->db_blkid; 3227 mutex_exit(&dn->dn_mtx); 3228 3229 if (dn->dn_type == DMU_OT_DNODE) { 3230 dnode_phys_t *dnp = db->db.db_data; 3231 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 3232 i--, dnp++) { 3233 if (dnp->dn_type != DMU_OT_NONE) 3234 fill++; 3235 } 3236 } else { 3237 if (BP_IS_HOLE(bp)) { 3238 fill = 0; 3239 } else { 3240 fill = 1; 3241 } 3242 } 3243 } else { 3244 blkptr_t *ibp = db->db.db_data; 3245 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3246 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3247 if (BP_IS_HOLE(ibp)) 3248 continue; 3249 fill += BP_GET_FILL(ibp); 3250 } 3251 } 3252 DB_DNODE_EXIT(db); 3253 3254 if (!BP_IS_EMBEDDED(bp)) 3255 bp->blk_fill = fill; 3256 3257 mutex_exit(&db->db_mtx); 3258 3259 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3260 *db->db_blkptr = *bp; 3261 rw_exit(&dn->dn_struct_rwlock); 3262 } 3263 3264 /* ARGSUSED */ 3265 /* 3266 * This function gets called just prior to running through the compression 3267 * stage of the zio pipeline. If we're an indirect block comprised of only 3268 * holes, then we want this indirect to be compressed away to a hole. In 3269 * order to do that we must zero out any information about the holes that 3270 * this indirect points to prior to before we try to compress it. 3271 */ 3272 static void 3273 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3274 { 3275 dmu_buf_impl_t *db = vdb; 3276 dnode_t *dn; 3277 blkptr_t *bp; 3278 unsigned int epbs, i; 3279 3280 ASSERT3U(db->db_level, >, 0); 3281 DB_DNODE_ENTER(db); 3282 dn = DB_DNODE(db); 3283 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3284 ASSERT3U(epbs, <, 31); 3285 3286 /* Determine if all our children are holes */ 3287 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 3288 if (!BP_IS_HOLE(bp)) 3289 break; 3290 } 3291 3292 /* 3293 * If all the children are holes, then zero them all out so that 3294 * we may get compressed away. 3295 */ 3296 if (i == 1 << epbs) { 3297 /* 3298 * We only found holes. Grab the rwlock to prevent 3299 * anybody from reading the blocks we're about to 3300 * zero out. 3301 */ 3302 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3303 bzero(db->db.db_data, db->db.db_size); 3304 rw_exit(&dn->dn_struct_rwlock); 3305 } 3306 DB_DNODE_EXIT(db); 3307 } 3308 3309 /* 3310 * The SPA will call this callback several times for each zio - once 3311 * for every physical child i/o (zio->io_phys_children times). This 3312 * allows the DMU to monitor the progress of each logical i/o. For example, 3313 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 3314 * block. There may be a long delay before all copies/fragments are completed, 3315 * so this callback allows us to retire dirty space gradually, as the physical 3316 * i/os complete. 3317 */ 3318 /* ARGSUSED */ 3319 static void 3320 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 3321 { 3322 dmu_buf_impl_t *db = arg; 3323 objset_t *os = db->db_objset; 3324 dsl_pool_t *dp = dmu_objset_pool(os); 3325 dbuf_dirty_record_t *dr; 3326 int delta = 0; 3327 3328 dr = db->db_data_pending; 3329 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 3330 3331 /* 3332 * The callback will be called io_phys_children times. Retire one 3333 * portion of our dirty space each time we are called. Any rounding 3334 * error will be cleaned up by dsl_pool_sync()'s call to 3335 * dsl_pool_undirty_space(). 3336 */ 3337 delta = dr->dr_accounted / zio->io_phys_children; 3338 dsl_pool_undirty_space(dp, delta, zio->io_txg); 3339 } 3340 3341 /* ARGSUSED */ 3342 static void 3343 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3344 { 3345 dmu_buf_impl_t *db = vdb; 3346 blkptr_t *bp_orig = &zio->io_bp_orig; 3347 blkptr_t *bp = db->db_blkptr; 3348 objset_t *os = db->db_objset; 3349 dmu_tx_t *tx = os->os_synctx; 3350 dbuf_dirty_record_t **drp, *dr; 3351 3352 ASSERT0(zio->io_error); 3353 ASSERT(db->db_blkptr == bp); 3354 3355 /* 3356 * For nopwrites and rewrites we ensure that the bp matches our 3357 * original and bypass all the accounting. 3358 */ 3359 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3360 ASSERT(BP_EQUAL(bp, bp_orig)); 3361 } else { 3362 dsl_dataset_t *ds = os->os_dsl_dataset; 3363 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3364 dsl_dataset_block_born(ds, bp, tx); 3365 } 3366 3367 mutex_enter(&db->db_mtx); 3368 3369 DBUF_VERIFY(db); 3370 3371 drp = &db->db_last_dirty; 3372 while ((dr = *drp) != db->db_data_pending) 3373 drp = &dr->dr_next; 3374 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3375 ASSERT(dr->dr_dbuf == db); 3376 ASSERT(dr->dr_next == NULL); 3377 *drp = dr->dr_next; 3378 3379 #ifdef ZFS_DEBUG 3380 if (db->db_blkid == DMU_SPILL_BLKID) { 3381 dnode_t *dn; 3382 3383 DB_DNODE_ENTER(db); 3384 dn = DB_DNODE(db); 3385 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3386 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 3387 db->db_blkptr == &dn->dn_phys->dn_spill); 3388 DB_DNODE_EXIT(db); 3389 } 3390 #endif 3391 3392 if (db->db_level == 0) { 3393 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3394 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 3395 if (db->db_state != DB_NOFILL) { 3396 if (dr->dt.dl.dr_data != db->db_buf) 3397 arc_buf_destroy(dr->dt.dl.dr_data, db); 3398 } 3399 } else { 3400 dnode_t *dn; 3401 3402 DB_DNODE_ENTER(db); 3403 dn = DB_DNODE(db); 3404 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3405 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3406 if (!BP_IS_HOLE(db->db_blkptr)) { 3407 int epbs = 3408 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3409 ASSERT3U(db->db_blkid, <=, 3410 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3411 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3412 db->db.db_size); 3413 } 3414 DB_DNODE_EXIT(db); 3415 mutex_destroy(&dr->dt.di.dr_mtx); 3416 list_destroy(&dr->dt.di.dr_children); 3417 } 3418 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3419 3420 cv_broadcast(&db->db_changed); 3421 ASSERT(db->db_dirtycnt > 0); 3422 db->db_dirtycnt -= 1; 3423 db->db_data_pending = NULL; 3424 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3425 } 3426 3427 static void 3428 dbuf_write_nofill_ready(zio_t *zio) 3429 { 3430 dbuf_write_ready(zio, NULL, zio->io_private); 3431 } 3432 3433 static void 3434 dbuf_write_nofill_done(zio_t *zio) 3435 { 3436 dbuf_write_done(zio, NULL, zio->io_private); 3437 } 3438 3439 static void 3440 dbuf_write_override_ready(zio_t *zio) 3441 { 3442 dbuf_dirty_record_t *dr = zio->io_private; 3443 dmu_buf_impl_t *db = dr->dr_dbuf; 3444 3445 dbuf_write_ready(zio, NULL, db); 3446 } 3447 3448 static void 3449 dbuf_write_override_done(zio_t *zio) 3450 { 3451 dbuf_dirty_record_t *dr = zio->io_private; 3452 dmu_buf_impl_t *db = dr->dr_dbuf; 3453 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3454 3455 mutex_enter(&db->db_mtx); 3456 if (!BP_EQUAL(zio->io_bp, obp)) { 3457 if (!BP_IS_HOLE(obp)) 3458 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3459 arc_release(dr->dt.dl.dr_data, db); 3460 } 3461 mutex_exit(&db->db_mtx); 3462 dbuf_write_done(zio, NULL, db); 3463 3464 if (zio->io_abd != NULL) 3465 abd_put(zio->io_abd); 3466 } 3467 3468 /* Issue I/O to commit a dirty buffer to disk. */ 3469 static void 3470 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3471 { 3472 dmu_buf_impl_t *db = dr->dr_dbuf; 3473 dnode_t *dn; 3474 objset_t *os; 3475 dmu_buf_impl_t *parent = db->db_parent; 3476 uint64_t txg = tx->tx_txg; 3477 zbookmark_phys_t zb; 3478 zio_prop_t zp; 3479 zio_t *zio; 3480 int wp_flag = 0; 3481 3482 ASSERT(dmu_tx_is_syncing(tx)); 3483 3484 DB_DNODE_ENTER(db); 3485 dn = DB_DNODE(db); 3486 os = dn->dn_objset; 3487 3488 if (db->db_state != DB_NOFILL) { 3489 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3490 /* 3491 * Private object buffers are released here rather 3492 * than in dbuf_dirty() since they are only modified 3493 * in the syncing context and we don't want the 3494 * overhead of making multiple copies of the data. 3495 */ 3496 if (BP_IS_HOLE(db->db_blkptr)) { 3497 arc_buf_thaw(data); 3498 } else { 3499 dbuf_release_bp(db); 3500 } 3501 } 3502 } 3503 3504 if (parent != dn->dn_dbuf) { 3505 /* Our parent is an indirect block. */ 3506 /* We have a dirty parent that has been scheduled for write. */ 3507 ASSERT(parent && parent->db_data_pending); 3508 /* Our parent's buffer is one level closer to the dnode. */ 3509 ASSERT(db->db_level == parent->db_level-1); 3510 /* 3511 * We're about to modify our parent's db_data by modifying 3512 * our block pointer, so the parent must be released. 3513 */ 3514 ASSERT(arc_released(parent->db_buf)); 3515 zio = parent->db_data_pending->dr_zio; 3516 } else { 3517 /* Our parent is the dnode itself. */ 3518 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 3519 db->db_blkid != DMU_SPILL_BLKID) || 3520 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 3521 if (db->db_blkid != DMU_SPILL_BLKID) 3522 ASSERT3P(db->db_blkptr, ==, 3523 &dn->dn_phys->dn_blkptr[db->db_blkid]); 3524 zio = dn->dn_zio; 3525 } 3526 3527 ASSERT(db->db_level == 0 || data == db->db_buf); 3528 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3529 ASSERT(zio); 3530 3531 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3532 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3533 db->db.db_object, db->db_level, db->db_blkid); 3534 3535 if (db->db_blkid == DMU_SPILL_BLKID) 3536 wp_flag = WP_SPILL; 3537 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 3538 3539 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3540 DB_DNODE_EXIT(db); 3541 3542 /* 3543 * We copy the blkptr now (rather than when we instantiate the dirty 3544 * record), because its value can change between open context and 3545 * syncing context. We do not need to hold dn_struct_rwlock to read 3546 * db_blkptr because we are in syncing context. 3547 */ 3548 dr->dr_bp_copy = *db->db_blkptr; 3549 3550 if (db->db_level == 0 && 3551 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 3552 /* 3553 * The BP for this block has been provided by open context 3554 * (by dmu_sync() or dmu_buf_write_embedded()). 3555 */ 3556 abd_t *contents = (data != NULL) ? 3557 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 3558 3559 dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 3560 contents, db->db.db_size, db->db.db_size, &zp, 3561 dbuf_write_override_ready, NULL, NULL, 3562 dbuf_write_override_done, 3563 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3564 mutex_enter(&db->db_mtx); 3565 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3566 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 3567 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3568 mutex_exit(&db->db_mtx); 3569 } else if (db->db_state == DB_NOFILL) { 3570 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3571 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3572 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3573 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 3574 dbuf_write_nofill_ready, NULL, NULL, 3575 dbuf_write_nofill_done, db, 3576 ZIO_PRIORITY_ASYNC_WRITE, 3577 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3578 } else { 3579 ASSERT(arc_released(data)); 3580 3581 /* 3582 * For indirect blocks, we want to setup the children 3583 * ready callback so that we can properly handle an indirect 3584 * block that only contains holes. 3585 */ 3586 arc_done_func_t *children_ready_cb = NULL; 3587 if (db->db_level != 0) 3588 children_ready_cb = dbuf_write_children_ready; 3589 3590 dr->dr_zio = arc_write(zio, os->os_spa, txg, 3591 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3592 &zp, dbuf_write_ready, children_ready_cb, 3593 dbuf_write_physdone, dbuf_write_done, db, 3594 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3595 } 3596 } 3597