1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_send.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dbuf.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/spa.h> 41 #include <sys/zio.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/zfeature.h> 46 #include <sys/blkptr.h> 47 #include <sys/range_tree.h> 48 #include <sys/callb.h> 49 #include <sys/abd.h> 50 #include <sys/vdev.h> 51 52 uint_t zfs_dbuf_evict_key; 53 54 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 55 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 56 57 #ifndef __lint 58 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 59 dmu_buf_evict_func_t *evict_func_sync, 60 dmu_buf_evict_func_t *evict_func_async, 61 dmu_buf_t **clear_on_evict_dbufp); 62 #endif /* ! __lint */ 63 64 /* 65 * Global data structures and functions for the dbuf cache. 66 */ 67 static kmem_cache_t *dbuf_kmem_cache; 68 static taskq_t *dbu_evict_taskq; 69 70 static kthread_t *dbuf_cache_evict_thread; 71 static kmutex_t dbuf_evict_lock; 72 static kcondvar_t dbuf_evict_cv; 73 static boolean_t dbuf_evict_thread_exit; 74 75 /* 76 * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 77 * are not currently held but have been recently released. These dbufs 78 * are not eligible for arc eviction until they are aged out of the cache. 79 * Dbufs are added to the dbuf cache once the last hold is released. If a 80 * dbuf is later accessed and still exists in the dbuf cache, then it will 81 * be removed from the cache and later re-added to the head of the cache. 82 * Dbufs that are aged out of the cache will be immediately destroyed and 83 * become eligible for arc eviction. 84 */ 85 static multilist_t *dbuf_cache; 86 static refcount_t dbuf_cache_size; 87 uint64_t dbuf_cache_max_bytes = 100 * 1024 * 1024; 88 89 /* Cap the size of the dbuf cache to log2 fraction of arc size. */ 90 int dbuf_cache_max_shift = 5; 91 92 /* 93 * The dbuf cache uses a three-stage eviction policy: 94 * - A low water marker designates when the dbuf eviction thread 95 * should stop evicting from the dbuf cache. 96 * - When we reach the maximum size (aka mid water mark), we 97 * signal the eviction thread to run. 98 * - The high water mark indicates when the eviction thread 99 * is unable to keep up with the incoming load and eviction must 100 * happen in the context of the calling thread. 101 * 102 * The dbuf cache: 103 * (max size) 104 * low water mid water hi water 105 * +----------------------------------------+----------+----------+ 106 * | | | | 107 * | | | | 108 * | | | | 109 * | | | | 110 * +----------------------------------------+----------+----------+ 111 * stop signal evict 112 * evicting eviction directly 113 * thread 114 * 115 * The high and low water marks indicate the operating range for the eviction 116 * thread. The low water mark is, by default, 90% of the total size of the 117 * cache and the high water mark is at 110% (both of these percentages can be 118 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 119 * respectively). The eviction thread will try to ensure that the cache remains 120 * within this range by waking up every second and checking if the cache is 121 * above the low water mark. The thread can also be woken up by callers adding 122 * elements into the cache if the cache is larger than the mid water (i.e max 123 * cache size). Once the eviction thread is woken up and eviction is required, 124 * it will continue evicting buffers until it's able to reduce the cache size 125 * to the low water mark. If the cache size continues to grow and hits the high 126 * water mark, then callers adding elments to the cache will begin to evict 127 * directly from the cache until the cache is no longer above the high water 128 * mark. 129 */ 130 131 /* 132 * The percentage above and below the maximum cache size. 133 */ 134 uint_t dbuf_cache_hiwater_pct = 10; 135 uint_t dbuf_cache_lowater_pct = 10; 136 137 /* ARGSUSED */ 138 static int 139 dbuf_cons(void *vdb, void *unused, int kmflag) 140 { 141 dmu_buf_impl_t *db = vdb; 142 bzero(db, sizeof (dmu_buf_impl_t)); 143 144 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 145 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 146 multilist_link_init(&db->db_cache_link); 147 refcount_create(&db->db_holds); 148 149 return (0); 150 } 151 152 /* ARGSUSED */ 153 static void 154 dbuf_dest(void *vdb, void *unused) 155 { 156 dmu_buf_impl_t *db = vdb; 157 mutex_destroy(&db->db_mtx); 158 cv_destroy(&db->db_changed); 159 ASSERT(!multilist_link_active(&db->db_cache_link)); 160 refcount_destroy(&db->db_holds); 161 } 162 163 /* 164 * dbuf hash table routines 165 */ 166 static dbuf_hash_table_t dbuf_hash_table; 167 168 static uint64_t dbuf_hash_count; 169 170 static uint64_t 171 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 172 { 173 uintptr_t osv = (uintptr_t)os; 174 uint64_t crc = -1ULL; 175 176 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 177 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 178 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 179 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 180 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 181 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 182 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 183 184 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 185 186 return (crc); 187 } 188 189 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 190 ((dbuf)->db.db_object == (obj) && \ 191 (dbuf)->db_objset == (os) && \ 192 (dbuf)->db_level == (level) && \ 193 (dbuf)->db_blkid == (blkid)) 194 195 dmu_buf_impl_t * 196 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 197 { 198 dbuf_hash_table_t *h = &dbuf_hash_table; 199 uint64_t hv = dbuf_hash(os, obj, level, blkid); 200 uint64_t idx = hv & h->hash_table_mask; 201 dmu_buf_impl_t *db; 202 203 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 204 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 205 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 206 mutex_enter(&db->db_mtx); 207 if (db->db_state != DB_EVICTING) { 208 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 209 return (db); 210 } 211 mutex_exit(&db->db_mtx); 212 } 213 } 214 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 215 return (NULL); 216 } 217 218 static dmu_buf_impl_t * 219 dbuf_find_bonus(objset_t *os, uint64_t object) 220 { 221 dnode_t *dn; 222 dmu_buf_impl_t *db = NULL; 223 224 if (dnode_hold(os, object, FTAG, &dn) == 0) { 225 rw_enter(&dn->dn_struct_rwlock, RW_READER); 226 if (dn->dn_bonus != NULL) { 227 db = dn->dn_bonus; 228 mutex_enter(&db->db_mtx); 229 } 230 rw_exit(&dn->dn_struct_rwlock); 231 dnode_rele(dn, FTAG); 232 } 233 return (db); 234 } 235 236 /* 237 * Insert an entry into the hash table. If there is already an element 238 * equal to elem in the hash table, then the already existing element 239 * will be returned and the new element will not be inserted. 240 * Otherwise returns NULL. 241 */ 242 static dmu_buf_impl_t * 243 dbuf_hash_insert(dmu_buf_impl_t *db) 244 { 245 dbuf_hash_table_t *h = &dbuf_hash_table; 246 objset_t *os = db->db_objset; 247 uint64_t obj = db->db.db_object; 248 int level = db->db_level; 249 uint64_t blkid = db->db_blkid; 250 uint64_t hv = dbuf_hash(os, obj, level, blkid); 251 uint64_t idx = hv & h->hash_table_mask; 252 dmu_buf_impl_t *dbf; 253 254 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 255 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 256 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 257 mutex_enter(&dbf->db_mtx); 258 if (dbf->db_state != DB_EVICTING) { 259 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 260 return (dbf); 261 } 262 mutex_exit(&dbf->db_mtx); 263 } 264 } 265 266 mutex_enter(&db->db_mtx); 267 db->db_hash_next = h->hash_table[idx]; 268 h->hash_table[idx] = db; 269 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 270 atomic_inc_64(&dbuf_hash_count); 271 272 return (NULL); 273 } 274 275 /* 276 * Remove an entry from the hash table. It must be in the EVICTING state. 277 */ 278 static void 279 dbuf_hash_remove(dmu_buf_impl_t *db) 280 { 281 dbuf_hash_table_t *h = &dbuf_hash_table; 282 uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 283 db->db_level, db->db_blkid); 284 uint64_t idx = hv & h->hash_table_mask; 285 dmu_buf_impl_t *dbf, **dbp; 286 287 /* 288 * We musn't hold db_mtx to maintain lock ordering: 289 * DBUF_HASH_MUTEX > db_mtx. 290 */ 291 ASSERT(refcount_is_zero(&db->db_holds)); 292 ASSERT(db->db_state == DB_EVICTING); 293 ASSERT(!MUTEX_HELD(&db->db_mtx)); 294 295 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 296 dbp = &h->hash_table[idx]; 297 while ((dbf = *dbp) != db) { 298 dbp = &dbf->db_hash_next; 299 ASSERT(dbf != NULL); 300 } 301 *dbp = db->db_hash_next; 302 db->db_hash_next = NULL; 303 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 304 atomic_dec_64(&dbuf_hash_count); 305 } 306 307 typedef enum { 308 DBVU_EVICTING, 309 DBVU_NOT_EVICTING 310 } dbvu_verify_type_t; 311 312 static void 313 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 314 { 315 #ifdef ZFS_DEBUG 316 int64_t holds; 317 318 if (db->db_user == NULL) 319 return; 320 321 /* Only data blocks support the attachment of user data. */ 322 ASSERT(db->db_level == 0); 323 324 /* Clients must resolve a dbuf before attaching user data. */ 325 ASSERT(db->db.db_data != NULL); 326 ASSERT3U(db->db_state, ==, DB_CACHED); 327 328 holds = refcount_count(&db->db_holds); 329 if (verify_type == DBVU_EVICTING) { 330 /* 331 * Immediate eviction occurs when holds == dirtycnt. 332 * For normal eviction buffers, holds is zero on 333 * eviction, except when dbuf_fix_old_data() calls 334 * dbuf_clear_data(). However, the hold count can grow 335 * during eviction even though db_mtx is held (see 336 * dmu_bonus_hold() for an example), so we can only 337 * test the generic invariant that holds >= dirtycnt. 338 */ 339 ASSERT3U(holds, >=, db->db_dirtycnt); 340 } else { 341 if (db->db_user_immediate_evict == TRUE) 342 ASSERT3U(holds, >=, db->db_dirtycnt); 343 else 344 ASSERT3U(holds, >, 0); 345 } 346 #endif 347 } 348 349 static void 350 dbuf_evict_user(dmu_buf_impl_t *db) 351 { 352 dmu_buf_user_t *dbu = db->db_user; 353 354 ASSERT(MUTEX_HELD(&db->db_mtx)); 355 356 if (dbu == NULL) 357 return; 358 359 dbuf_verify_user(db, DBVU_EVICTING); 360 db->db_user = NULL; 361 362 #ifdef ZFS_DEBUG 363 if (dbu->dbu_clear_on_evict_dbufp != NULL) 364 *dbu->dbu_clear_on_evict_dbufp = NULL; 365 #endif 366 367 /* 368 * There are two eviction callbacks - one that we call synchronously 369 * and one that we invoke via a taskq. The async one is useful for 370 * avoiding lock order reversals and limiting stack depth. 371 * 372 * Note that if we have a sync callback but no async callback, 373 * it's likely that the sync callback will free the structure 374 * containing the dbu. In that case we need to take care to not 375 * dereference dbu after calling the sync evict func. 376 */ 377 boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 378 379 if (dbu->dbu_evict_func_sync != NULL) 380 dbu->dbu_evict_func_sync(dbu); 381 382 if (has_async) { 383 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 384 dbu, 0, &dbu->dbu_tqent); 385 } 386 } 387 388 boolean_t 389 dbuf_is_metadata(dmu_buf_impl_t *db) 390 { 391 if (db->db_level > 0) { 392 return (B_TRUE); 393 } else { 394 boolean_t is_metadata; 395 396 DB_DNODE_ENTER(db); 397 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 398 DB_DNODE_EXIT(db); 399 400 return (is_metadata); 401 } 402 } 403 404 /* 405 * This function *must* return indices evenly distributed between all 406 * sublists of the multilist. This is needed due to how the dbuf eviction 407 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 408 * distributed between all sublists and uses this assumption when 409 * deciding which sublist to evict from and how much to evict from it. 410 */ 411 unsigned int 412 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 413 { 414 dmu_buf_impl_t *db = obj; 415 416 /* 417 * The assumption here, is the hash value for a given 418 * dmu_buf_impl_t will remain constant throughout it's lifetime 419 * (i.e. it's objset, object, level and blkid fields don't change). 420 * Thus, we don't need to store the dbuf's sublist index 421 * on insertion, as this index can be recalculated on removal. 422 * 423 * Also, the low order bits of the hash value are thought to be 424 * distributed evenly. Otherwise, in the case that the multilist 425 * has a power of two number of sublists, each sublists' usage 426 * would not be evenly distributed. 427 */ 428 return (dbuf_hash(db->db_objset, db->db.db_object, 429 db->db_level, db->db_blkid) % 430 multilist_get_num_sublists(ml)); 431 } 432 433 static inline boolean_t 434 dbuf_cache_above_hiwater(void) 435 { 436 uint64_t dbuf_cache_hiwater_bytes = 437 (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 438 439 return (refcount_count(&dbuf_cache_size) > 440 dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 441 } 442 443 static inline boolean_t 444 dbuf_cache_above_lowater(void) 445 { 446 uint64_t dbuf_cache_lowater_bytes = 447 (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 448 449 return (refcount_count(&dbuf_cache_size) > 450 dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 451 } 452 453 /* 454 * Evict the oldest eligible dbuf from the dbuf cache. 455 */ 456 static void 457 dbuf_evict_one(void) 458 { 459 int idx = multilist_get_random_index(dbuf_cache); 460 multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); 461 462 ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 463 464 /* 465 * Set the thread's tsd to indicate that it's processing evictions. 466 * Once a thread stops evicting from the dbuf cache it will 467 * reset its tsd to NULL. 468 */ 469 ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); 470 (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); 471 472 dmu_buf_impl_t *db = multilist_sublist_tail(mls); 473 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 474 db = multilist_sublist_prev(mls, db); 475 } 476 477 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 478 multilist_sublist_t *, mls); 479 480 if (db != NULL) { 481 multilist_sublist_remove(mls, db); 482 multilist_sublist_unlock(mls); 483 (void) refcount_remove_many(&dbuf_cache_size, 484 db->db.db_size, db); 485 dbuf_destroy(db); 486 } else { 487 multilist_sublist_unlock(mls); 488 } 489 (void) tsd_set(zfs_dbuf_evict_key, NULL); 490 } 491 492 /* 493 * The dbuf evict thread is responsible for aging out dbufs from the 494 * cache. Once the cache has reached it's maximum size, dbufs are removed 495 * and destroyed. The eviction thread will continue running until the size 496 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 497 * out of the cache it is destroyed and becomes eligible for arc eviction. 498 */ 499 /* ARGSUSED */ 500 static void 501 dbuf_evict_thread(void *unused) 502 { 503 callb_cpr_t cpr; 504 505 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 506 507 mutex_enter(&dbuf_evict_lock); 508 while (!dbuf_evict_thread_exit) { 509 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 510 CALLB_CPR_SAFE_BEGIN(&cpr); 511 (void) cv_timedwait_hires(&dbuf_evict_cv, 512 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 513 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 514 } 515 mutex_exit(&dbuf_evict_lock); 516 517 /* 518 * Keep evicting as long as we're above the low water mark 519 * for the cache. We do this without holding the locks to 520 * minimize lock contention. 521 */ 522 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 523 dbuf_evict_one(); 524 } 525 526 mutex_enter(&dbuf_evict_lock); 527 } 528 529 dbuf_evict_thread_exit = B_FALSE; 530 cv_broadcast(&dbuf_evict_cv); 531 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 532 thread_exit(); 533 } 534 535 /* 536 * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 537 * If the dbuf cache is at its high water mark, then evict a dbuf from the 538 * dbuf cache using the callers context. 539 */ 540 static void 541 dbuf_evict_notify(void) 542 { 543 544 /* 545 * We use thread specific data to track when a thread has 546 * started processing evictions. This allows us to avoid deeply 547 * nested stacks that would have a call flow similar to this: 548 * 549 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 550 * ^ | 551 * | | 552 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 553 * 554 * The dbuf_eviction_thread will always have its tsd set until 555 * that thread exits. All other threads will only set their tsd 556 * if they are participating in the eviction process. This only 557 * happens if the eviction thread is unable to process evictions 558 * fast enough. To keep the dbuf cache size in check, other threads 559 * can evict from the dbuf cache directly. Those threads will set 560 * their tsd values so that we ensure that they only evict one dbuf 561 * from the dbuf cache. 562 */ 563 if (tsd_get(zfs_dbuf_evict_key) != NULL) 564 return; 565 566 /* 567 * We check if we should evict without holding the dbuf_evict_lock, 568 * because it's OK to occasionally make the wrong decision here, 569 * and grabbing the lock results in massive lock contention. 570 */ 571 if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { 572 if (dbuf_cache_above_hiwater()) 573 dbuf_evict_one(); 574 cv_signal(&dbuf_evict_cv); 575 } 576 } 577 578 void 579 dbuf_init(void) 580 { 581 uint64_t hsize = 1ULL << 16; 582 dbuf_hash_table_t *h = &dbuf_hash_table; 583 int i; 584 585 /* 586 * The hash table is big enough to fill all of physical memory 587 * with an average 4K block size. The table will take up 588 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 589 */ 590 while (hsize * 4096 < physmem * PAGESIZE) 591 hsize <<= 1; 592 593 retry: 594 h->hash_table_mask = hsize - 1; 595 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 596 if (h->hash_table == NULL) { 597 /* XXX - we should really return an error instead of assert */ 598 ASSERT(hsize > (1ULL << 10)); 599 hsize >>= 1; 600 goto retry; 601 } 602 603 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 604 sizeof (dmu_buf_impl_t), 605 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 606 607 for (i = 0; i < DBUF_MUTEXES; i++) 608 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 609 610 /* 611 * Setup the parameters for the dbuf cache. We cap the size of the 612 * dbuf cache to 1/32nd (default) of the size of the ARC. 613 */ 614 dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes, 615 arc_max_bytes() >> dbuf_cache_max_shift); 616 617 /* 618 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 619 * configuration is not required. 620 */ 621 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 622 623 dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t), 624 offsetof(dmu_buf_impl_t, db_cache_link), 625 dbuf_cache_multilist_index_func); 626 refcount_create(&dbuf_cache_size); 627 628 tsd_create(&zfs_dbuf_evict_key, NULL); 629 dbuf_evict_thread_exit = B_FALSE; 630 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 631 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 632 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 633 NULL, 0, &p0, TS_RUN, minclsyspri); 634 } 635 636 void 637 dbuf_fini(void) 638 { 639 dbuf_hash_table_t *h = &dbuf_hash_table; 640 int i; 641 642 for (i = 0; i < DBUF_MUTEXES; i++) 643 mutex_destroy(&h->hash_mutexes[i]); 644 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 645 kmem_cache_destroy(dbuf_kmem_cache); 646 taskq_destroy(dbu_evict_taskq); 647 648 mutex_enter(&dbuf_evict_lock); 649 dbuf_evict_thread_exit = B_TRUE; 650 while (dbuf_evict_thread_exit) { 651 cv_signal(&dbuf_evict_cv); 652 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 653 } 654 mutex_exit(&dbuf_evict_lock); 655 tsd_destroy(&zfs_dbuf_evict_key); 656 657 mutex_destroy(&dbuf_evict_lock); 658 cv_destroy(&dbuf_evict_cv); 659 660 refcount_destroy(&dbuf_cache_size); 661 multilist_destroy(dbuf_cache); 662 } 663 664 /* 665 * Other stuff. 666 */ 667 668 #ifdef ZFS_DEBUG 669 static void 670 dbuf_verify(dmu_buf_impl_t *db) 671 { 672 dnode_t *dn; 673 dbuf_dirty_record_t *dr; 674 675 ASSERT(MUTEX_HELD(&db->db_mtx)); 676 677 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 678 return; 679 680 ASSERT(db->db_objset != NULL); 681 DB_DNODE_ENTER(db); 682 dn = DB_DNODE(db); 683 if (dn == NULL) { 684 ASSERT(db->db_parent == NULL); 685 ASSERT(db->db_blkptr == NULL); 686 } else { 687 ASSERT3U(db->db.db_object, ==, dn->dn_object); 688 ASSERT3P(db->db_objset, ==, dn->dn_objset); 689 ASSERT3U(db->db_level, <, dn->dn_nlevels); 690 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 691 db->db_blkid == DMU_SPILL_BLKID || 692 !avl_is_empty(&dn->dn_dbufs)); 693 } 694 if (db->db_blkid == DMU_BONUS_BLKID) { 695 ASSERT(dn != NULL); 696 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 697 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 698 } else if (db->db_blkid == DMU_SPILL_BLKID) { 699 ASSERT(dn != NULL); 700 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 701 ASSERT0(db->db.db_offset); 702 } else { 703 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 704 } 705 706 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 707 ASSERT(dr->dr_dbuf == db); 708 709 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 710 ASSERT(dr->dr_dbuf == db); 711 712 /* 713 * We can't assert that db_size matches dn_datablksz because it 714 * can be momentarily different when another thread is doing 715 * dnode_set_blksz(). 716 */ 717 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 718 dr = db->db_data_pending; 719 /* 720 * It should only be modified in syncing context, so 721 * make sure we only have one copy of the data. 722 */ 723 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 724 } 725 726 /* verify db->db_blkptr */ 727 if (db->db_blkptr) { 728 if (db->db_parent == dn->dn_dbuf) { 729 /* db is pointed to by the dnode */ 730 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 731 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 732 ASSERT(db->db_parent == NULL); 733 else 734 ASSERT(db->db_parent != NULL); 735 if (db->db_blkid != DMU_SPILL_BLKID) 736 ASSERT3P(db->db_blkptr, ==, 737 &dn->dn_phys->dn_blkptr[db->db_blkid]); 738 } else { 739 /* db is pointed to by an indirect block */ 740 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 741 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 742 ASSERT3U(db->db_parent->db.db_object, ==, 743 db->db.db_object); 744 /* 745 * dnode_grow_indblksz() can make this fail if we don't 746 * have the struct_rwlock. XXX indblksz no longer 747 * grows. safe to do this now? 748 */ 749 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 750 ASSERT3P(db->db_blkptr, ==, 751 ((blkptr_t *)db->db_parent->db.db_data + 752 db->db_blkid % epb)); 753 } 754 } 755 } 756 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 757 (db->db_buf == NULL || db->db_buf->b_data) && 758 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 759 db->db_state != DB_FILL && !dn->dn_free_txg) { 760 /* 761 * If the blkptr isn't set but they have nonzero data, 762 * it had better be dirty, otherwise we'll lose that 763 * data when we evict this buffer. 764 * 765 * There is an exception to this rule for indirect blocks; in 766 * this case, if the indirect block is a hole, we fill in a few 767 * fields on each of the child blocks (importantly, birth time) 768 * to prevent hole birth times from being lost when you 769 * partially fill in a hole. 770 */ 771 if (db->db_dirtycnt == 0) { 772 if (db->db_level == 0) { 773 uint64_t *buf = db->db.db_data; 774 int i; 775 776 for (i = 0; i < db->db.db_size >> 3; i++) { 777 ASSERT(buf[i] == 0); 778 } 779 } else { 780 blkptr_t *bps = db->db.db_data; 781 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 782 db->db.db_size); 783 /* 784 * We want to verify that all the blkptrs in the 785 * indirect block are holes, but we may have 786 * automatically set up a few fields for them. 787 * We iterate through each blkptr and verify 788 * they only have those fields set. 789 */ 790 for (int i = 0; 791 i < db->db.db_size / sizeof (blkptr_t); 792 i++) { 793 blkptr_t *bp = &bps[i]; 794 ASSERT(ZIO_CHECKSUM_IS_ZERO( 795 &bp->blk_cksum)); 796 ASSERT( 797 DVA_IS_EMPTY(&bp->blk_dva[0]) && 798 DVA_IS_EMPTY(&bp->blk_dva[1]) && 799 DVA_IS_EMPTY(&bp->blk_dva[2])); 800 ASSERT0(bp->blk_fill); 801 ASSERT0(bp->blk_pad[0]); 802 ASSERT0(bp->blk_pad[1]); 803 ASSERT(!BP_IS_EMBEDDED(bp)); 804 ASSERT(BP_IS_HOLE(bp)); 805 ASSERT0(bp->blk_phys_birth); 806 } 807 } 808 } 809 } 810 DB_DNODE_EXIT(db); 811 } 812 #endif 813 814 static void 815 dbuf_clear_data(dmu_buf_impl_t *db) 816 { 817 ASSERT(MUTEX_HELD(&db->db_mtx)); 818 dbuf_evict_user(db); 819 ASSERT3P(db->db_buf, ==, NULL); 820 db->db.db_data = NULL; 821 if (db->db_state != DB_NOFILL) 822 db->db_state = DB_UNCACHED; 823 } 824 825 static void 826 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 827 { 828 ASSERT(MUTEX_HELD(&db->db_mtx)); 829 ASSERT(buf != NULL); 830 831 db->db_buf = buf; 832 ASSERT(buf->b_data != NULL); 833 db->db.db_data = buf->b_data; 834 } 835 836 /* 837 * Loan out an arc_buf for read. Return the loaned arc_buf. 838 */ 839 arc_buf_t * 840 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 841 { 842 arc_buf_t *abuf; 843 844 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 845 mutex_enter(&db->db_mtx); 846 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 847 int blksz = db->db.db_size; 848 spa_t *spa = db->db_objset->os_spa; 849 850 mutex_exit(&db->db_mtx); 851 abuf = arc_loan_buf(spa, B_FALSE, blksz); 852 bcopy(db->db.db_data, abuf->b_data, blksz); 853 } else { 854 abuf = db->db_buf; 855 arc_loan_inuse_buf(abuf, db); 856 db->db_buf = NULL; 857 dbuf_clear_data(db); 858 mutex_exit(&db->db_mtx); 859 } 860 return (abuf); 861 } 862 863 /* 864 * Calculate which level n block references the data at the level 0 offset 865 * provided. 866 */ 867 uint64_t 868 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 869 { 870 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 871 /* 872 * The level n blkid is equal to the level 0 blkid divided by 873 * the number of level 0s in a level n block. 874 * 875 * The level 0 blkid is offset >> datablkshift = 876 * offset / 2^datablkshift. 877 * 878 * The number of level 0s in a level n is the number of block 879 * pointers in an indirect block, raised to the power of level. 880 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 881 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 882 * 883 * Thus, the level n blkid is: offset / 884 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 885 * = offset / 2^(datablkshift + level * 886 * (indblkshift - SPA_BLKPTRSHIFT)) 887 * = offset >> (datablkshift + level * 888 * (indblkshift - SPA_BLKPTRSHIFT)) 889 */ 890 return (offset >> (dn->dn_datablkshift + level * 891 (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 892 } else { 893 ASSERT3U(offset, <, dn->dn_datablksz); 894 return (0); 895 } 896 } 897 898 static void 899 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 900 { 901 dmu_buf_impl_t *db = vdb; 902 903 mutex_enter(&db->db_mtx); 904 ASSERT3U(db->db_state, ==, DB_READ); 905 /* 906 * All reads are synchronous, so we must have a hold on the dbuf 907 */ 908 ASSERT(refcount_count(&db->db_holds) > 0); 909 ASSERT(db->db_buf == NULL); 910 ASSERT(db->db.db_data == NULL); 911 if (db->db_level == 0 && db->db_freed_in_flight) { 912 /* we were freed in flight; disregard any error */ 913 arc_release(buf, db); 914 bzero(buf->b_data, db->db.db_size); 915 arc_buf_freeze(buf); 916 db->db_freed_in_flight = FALSE; 917 dbuf_set_data(db, buf); 918 db->db_state = DB_CACHED; 919 } else if (zio == NULL || zio->io_error == 0) { 920 dbuf_set_data(db, buf); 921 db->db_state = DB_CACHED; 922 } else { 923 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 924 ASSERT3P(db->db_buf, ==, NULL); 925 arc_buf_destroy(buf, db); 926 db->db_state = DB_UNCACHED; 927 } 928 cv_broadcast(&db->db_changed); 929 dbuf_rele_and_unlock(db, NULL); 930 } 931 932 static void 933 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 934 { 935 dnode_t *dn; 936 zbookmark_phys_t zb; 937 arc_flags_t aflags = ARC_FLAG_NOWAIT; 938 939 DB_DNODE_ENTER(db); 940 dn = DB_DNODE(db); 941 ASSERT(!refcount_is_zero(&db->db_holds)); 942 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 943 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 944 ASSERT(MUTEX_HELD(&db->db_mtx)); 945 ASSERT(db->db_state == DB_UNCACHED); 946 ASSERT(db->db_buf == NULL); 947 948 if (db->db_blkid == DMU_BONUS_BLKID) { 949 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 950 951 ASSERT3U(bonuslen, <=, db->db.db_size); 952 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 953 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 954 if (bonuslen < DN_MAX_BONUSLEN) 955 bzero(db->db.db_data, DN_MAX_BONUSLEN); 956 if (bonuslen) 957 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 958 DB_DNODE_EXIT(db); 959 db->db_state = DB_CACHED; 960 mutex_exit(&db->db_mtx); 961 return; 962 } 963 964 /* 965 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 966 * processes the delete record and clears the bp while we are waiting 967 * for the dn_mtx (resulting in a "no" from block_freed). 968 */ 969 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 970 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 971 BP_IS_HOLE(db->db_blkptr)))) { 972 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 973 974 dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 975 db->db.db_size)); 976 bzero(db->db.db_data, db->db.db_size); 977 978 if (db->db_blkptr != NULL && db->db_level > 0 && 979 BP_IS_HOLE(db->db_blkptr) && 980 db->db_blkptr->blk_birth != 0) { 981 blkptr_t *bps = db->db.db_data; 982 for (int i = 0; i < ((1 << 983 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 984 i++) { 985 blkptr_t *bp = &bps[i]; 986 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 987 1 << dn->dn_indblkshift); 988 BP_SET_LSIZE(bp, 989 BP_GET_LEVEL(db->db_blkptr) == 1 ? 990 dn->dn_datablksz : 991 BP_GET_LSIZE(db->db_blkptr)); 992 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 993 BP_SET_LEVEL(bp, 994 BP_GET_LEVEL(db->db_blkptr) - 1); 995 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 996 } 997 } 998 DB_DNODE_EXIT(db); 999 db->db_state = DB_CACHED; 1000 mutex_exit(&db->db_mtx); 1001 return; 1002 } 1003 1004 DB_DNODE_EXIT(db); 1005 1006 db->db_state = DB_READ; 1007 mutex_exit(&db->db_mtx); 1008 1009 if (DBUF_IS_L2CACHEABLE(db)) 1010 aflags |= ARC_FLAG_L2CACHE; 1011 1012 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1013 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1014 db->db.db_object, db->db_level, db->db_blkid); 1015 1016 dbuf_add_ref(db, NULL); 1017 1018 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1019 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1020 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 1021 &aflags, &zb); 1022 } 1023 1024 /* 1025 * This is our just-in-time copy function. It makes a copy of buffers that 1026 * have been modified in a previous transaction group before we access them in 1027 * the current active group. 1028 * 1029 * This function is used in three places: when we are dirtying a buffer for the 1030 * first time in a txg, when we are freeing a range in a dnode that includes 1031 * this buffer, and when we are accessing a buffer which was received compressed 1032 * and later referenced in a WRITE_BYREF record. 1033 * 1034 * Note that when we are called from dbuf_free_range() we do not put a hold on 1035 * the buffer, we just traverse the active dbuf list for the dnode. 1036 */ 1037 static void 1038 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 1039 { 1040 dbuf_dirty_record_t *dr = db->db_last_dirty; 1041 1042 ASSERT(MUTEX_HELD(&db->db_mtx)); 1043 ASSERT(db->db.db_data != NULL); 1044 ASSERT(db->db_level == 0); 1045 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 1046 1047 if (dr == NULL || 1048 (dr->dt.dl.dr_data != 1049 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 1050 return; 1051 1052 /* 1053 * If the last dirty record for this dbuf has not yet synced 1054 * and its referencing the dbuf data, either: 1055 * reset the reference to point to a new copy, 1056 * or (if there a no active holders) 1057 * just null out the current db_data pointer. 1058 */ 1059 ASSERT(dr->dr_txg >= txg - 2); 1060 if (db->db_blkid == DMU_BONUS_BLKID) { 1061 /* Note that the data bufs here are zio_bufs */ 1062 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 1063 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1064 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 1065 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 1066 int size = arc_buf_size(db->db_buf); 1067 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1068 spa_t *spa = db->db_objset->os_spa; 1069 enum zio_compress compress_type = 1070 arc_get_compression(db->db_buf); 1071 1072 if (compress_type == ZIO_COMPRESS_OFF) { 1073 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 1074 } else { 1075 ASSERT3U(type, ==, ARC_BUFC_DATA); 1076 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 1077 size, arc_buf_lsize(db->db_buf), compress_type); 1078 } 1079 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 1080 } else { 1081 db->db_buf = NULL; 1082 dbuf_clear_data(db); 1083 } 1084 } 1085 1086 int 1087 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1088 { 1089 int err = 0; 1090 boolean_t prefetch; 1091 dnode_t *dn; 1092 1093 /* 1094 * We don't have to hold the mutex to check db_state because it 1095 * can't be freed while we have a hold on the buffer. 1096 */ 1097 ASSERT(!refcount_is_zero(&db->db_holds)); 1098 1099 if (db->db_state == DB_NOFILL) 1100 return (SET_ERROR(EIO)); 1101 1102 DB_DNODE_ENTER(db); 1103 dn = DB_DNODE(db); 1104 if ((flags & DB_RF_HAVESTRUCT) == 0) 1105 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1106 1107 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1108 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 1109 DBUF_IS_CACHEABLE(db); 1110 1111 mutex_enter(&db->db_mtx); 1112 if (db->db_state == DB_CACHED) { 1113 /* 1114 * If the arc buf is compressed, we need to decompress it to 1115 * read the data. This could happen during the "zfs receive" of 1116 * a stream which is compressed and deduplicated. 1117 */ 1118 if (db->db_buf != NULL && 1119 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 1120 dbuf_fix_old_data(db, 1121 spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1122 err = arc_decompress(db->db_buf); 1123 dbuf_set_data(db, db->db_buf); 1124 } 1125 mutex_exit(&db->db_mtx); 1126 if (prefetch) 1127 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1128 if ((flags & DB_RF_HAVESTRUCT) == 0) 1129 rw_exit(&dn->dn_struct_rwlock); 1130 DB_DNODE_EXIT(db); 1131 } else if (db->db_state == DB_UNCACHED) { 1132 spa_t *spa = dn->dn_objset->os_spa; 1133 boolean_t need_wait = B_FALSE; 1134 1135 if (zio == NULL && 1136 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1137 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1138 need_wait = B_TRUE; 1139 } 1140 dbuf_read_impl(db, zio, flags); 1141 1142 /* dbuf_read_impl has dropped db_mtx for us */ 1143 1144 if (prefetch) 1145 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1146 1147 if ((flags & DB_RF_HAVESTRUCT) == 0) 1148 rw_exit(&dn->dn_struct_rwlock); 1149 DB_DNODE_EXIT(db); 1150 1151 if (need_wait) 1152 err = zio_wait(zio); 1153 } else { 1154 /* 1155 * Another reader came in while the dbuf was in flight 1156 * between UNCACHED and CACHED. Either a writer will finish 1157 * writing the buffer (sending the dbuf to CACHED) or the 1158 * first reader's request will reach the read_done callback 1159 * and send the dbuf to CACHED. Otherwise, a failure 1160 * occurred and the dbuf went to UNCACHED. 1161 */ 1162 mutex_exit(&db->db_mtx); 1163 if (prefetch) 1164 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1165 if ((flags & DB_RF_HAVESTRUCT) == 0) 1166 rw_exit(&dn->dn_struct_rwlock); 1167 DB_DNODE_EXIT(db); 1168 1169 /* Skip the wait per the caller's request. */ 1170 mutex_enter(&db->db_mtx); 1171 if ((flags & DB_RF_NEVERWAIT) == 0) { 1172 while (db->db_state == DB_READ || 1173 db->db_state == DB_FILL) { 1174 ASSERT(db->db_state == DB_READ || 1175 (flags & DB_RF_HAVESTRUCT) == 0); 1176 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1177 db, zio_t *, zio); 1178 cv_wait(&db->db_changed, &db->db_mtx); 1179 } 1180 if (db->db_state == DB_UNCACHED) 1181 err = SET_ERROR(EIO); 1182 } 1183 mutex_exit(&db->db_mtx); 1184 } 1185 1186 return (err); 1187 } 1188 1189 static void 1190 dbuf_noread(dmu_buf_impl_t *db) 1191 { 1192 ASSERT(!refcount_is_zero(&db->db_holds)); 1193 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1194 mutex_enter(&db->db_mtx); 1195 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1196 cv_wait(&db->db_changed, &db->db_mtx); 1197 if (db->db_state == DB_UNCACHED) { 1198 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1199 spa_t *spa = db->db_objset->os_spa; 1200 1201 ASSERT(db->db_buf == NULL); 1202 ASSERT(db->db.db_data == NULL); 1203 dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1204 db->db_state = DB_FILL; 1205 } else if (db->db_state == DB_NOFILL) { 1206 dbuf_clear_data(db); 1207 } else { 1208 ASSERT3U(db->db_state, ==, DB_CACHED); 1209 } 1210 mutex_exit(&db->db_mtx); 1211 } 1212 1213 void 1214 dbuf_unoverride(dbuf_dirty_record_t *dr) 1215 { 1216 dmu_buf_impl_t *db = dr->dr_dbuf; 1217 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1218 uint64_t txg = dr->dr_txg; 1219 1220 ASSERT(MUTEX_HELD(&db->db_mtx)); 1221 /* 1222 * This assert is valid because dmu_sync() expects to be called by 1223 * a zilog's get_data while holding a range lock. This call only 1224 * comes from dbuf_dirty() callers who must also hold a range lock. 1225 */ 1226 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1227 ASSERT(db->db_level == 0); 1228 1229 if (db->db_blkid == DMU_BONUS_BLKID || 1230 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1231 return; 1232 1233 ASSERT(db->db_data_pending != dr); 1234 1235 /* free this block */ 1236 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 1237 zio_free(db->db_objset->os_spa, txg, bp); 1238 1239 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1240 dr->dt.dl.dr_nopwrite = B_FALSE; 1241 1242 /* 1243 * Release the already-written buffer, so we leave it in 1244 * a consistent dirty state. Note that all callers are 1245 * modifying the buffer, so they will immediately do 1246 * another (redundant) arc_release(). Therefore, leave 1247 * the buf thawed to save the effort of freezing & 1248 * immediately re-thawing it. 1249 */ 1250 arc_release(dr->dt.dl.dr_data, db); 1251 } 1252 1253 /* 1254 * Evict (if its unreferenced) or clear (if its referenced) any level-0 1255 * data blocks in the free range, so that any future readers will find 1256 * empty blocks. 1257 */ 1258 void 1259 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 1260 dmu_tx_t *tx) 1261 { 1262 dmu_buf_impl_t db_search; 1263 dmu_buf_impl_t *db, *db_next; 1264 uint64_t txg = tx->tx_txg; 1265 avl_index_t where; 1266 1267 if (end_blkid > dn->dn_maxblkid && 1268 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 1269 end_blkid = dn->dn_maxblkid; 1270 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 1271 1272 db_search.db_level = 0; 1273 db_search.db_blkid = start_blkid; 1274 db_search.db_state = DB_SEARCH; 1275 1276 mutex_enter(&dn->dn_dbufs_mtx); 1277 db = avl_find(&dn->dn_dbufs, &db_search, &where); 1278 ASSERT3P(db, ==, NULL); 1279 1280 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1281 1282 for (; db != NULL; db = db_next) { 1283 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1284 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1285 1286 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1287 break; 1288 } 1289 ASSERT3U(db->db_blkid, >=, start_blkid); 1290 1291 /* found a level 0 buffer in the range */ 1292 mutex_enter(&db->db_mtx); 1293 if (dbuf_undirty(db, tx)) { 1294 /* mutex has been dropped and dbuf destroyed */ 1295 continue; 1296 } 1297 1298 if (db->db_state == DB_UNCACHED || 1299 db->db_state == DB_NOFILL || 1300 db->db_state == DB_EVICTING) { 1301 ASSERT(db->db.db_data == NULL); 1302 mutex_exit(&db->db_mtx); 1303 continue; 1304 } 1305 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1306 /* will be handled in dbuf_read_done or dbuf_rele */ 1307 db->db_freed_in_flight = TRUE; 1308 mutex_exit(&db->db_mtx); 1309 continue; 1310 } 1311 if (refcount_count(&db->db_holds) == 0) { 1312 ASSERT(db->db_buf); 1313 dbuf_destroy(db); 1314 continue; 1315 } 1316 /* The dbuf is referenced */ 1317 1318 if (db->db_last_dirty != NULL) { 1319 dbuf_dirty_record_t *dr = db->db_last_dirty; 1320 1321 if (dr->dr_txg == txg) { 1322 /* 1323 * This buffer is "in-use", re-adjust the file 1324 * size to reflect that this buffer may 1325 * contain new data when we sync. 1326 */ 1327 if (db->db_blkid != DMU_SPILL_BLKID && 1328 db->db_blkid > dn->dn_maxblkid) 1329 dn->dn_maxblkid = db->db_blkid; 1330 dbuf_unoverride(dr); 1331 } else { 1332 /* 1333 * This dbuf is not dirty in the open context. 1334 * Either uncache it (if its not referenced in 1335 * the open context) or reset its contents to 1336 * empty. 1337 */ 1338 dbuf_fix_old_data(db, txg); 1339 } 1340 } 1341 /* clear the contents if its cached */ 1342 if (db->db_state == DB_CACHED) { 1343 ASSERT(db->db.db_data != NULL); 1344 arc_release(db->db_buf, db); 1345 bzero(db->db.db_data, db->db.db_size); 1346 arc_buf_freeze(db->db_buf); 1347 } 1348 1349 mutex_exit(&db->db_mtx); 1350 } 1351 mutex_exit(&dn->dn_dbufs_mtx); 1352 } 1353 1354 void 1355 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1356 { 1357 arc_buf_t *buf, *obuf; 1358 int osize = db->db.db_size; 1359 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1360 dnode_t *dn; 1361 1362 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1363 1364 DB_DNODE_ENTER(db); 1365 dn = DB_DNODE(db); 1366 1367 /* XXX does *this* func really need the lock? */ 1368 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1369 1370 /* 1371 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1372 * is OK, because there can be no other references to the db 1373 * when we are changing its size, so no concurrent DB_FILL can 1374 * be happening. 1375 */ 1376 /* 1377 * XXX we should be doing a dbuf_read, checking the return 1378 * value and returning that up to our callers 1379 */ 1380 dmu_buf_will_dirty(&db->db, tx); 1381 1382 /* create the data buffer for the new block */ 1383 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1384 1385 /* copy old block data to the new block */ 1386 obuf = db->db_buf; 1387 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1388 /* zero the remainder */ 1389 if (size > osize) 1390 bzero((uint8_t *)buf->b_data + osize, size - osize); 1391 1392 mutex_enter(&db->db_mtx); 1393 dbuf_set_data(db, buf); 1394 arc_buf_destroy(obuf, db); 1395 db->db.db_size = size; 1396 1397 if (db->db_level == 0) { 1398 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1399 db->db_last_dirty->dt.dl.dr_data = buf; 1400 } 1401 mutex_exit(&db->db_mtx); 1402 1403 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1404 DB_DNODE_EXIT(db); 1405 } 1406 1407 void 1408 dbuf_release_bp(dmu_buf_impl_t *db) 1409 { 1410 objset_t *os = db->db_objset; 1411 1412 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1413 ASSERT(arc_released(os->os_phys_buf) || 1414 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1415 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1416 1417 (void) arc_release(db->db_buf, db); 1418 } 1419 1420 /* 1421 * We already have a dirty record for this TXG, and we are being 1422 * dirtied again. 1423 */ 1424 static void 1425 dbuf_redirty(dbuf_dirty_record_t *dr) 1426 { 1427 dmu_buf_impl_t *db = dr->dr_dbuf; 1428 1429 ASSERT(MUTEX_HELD(&db->db_mtx)); 1430 1431 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1432 /* 1433 * If this buffer has already been written out, 1434 * we now need to reset its state. 1435 */ 1436 dbuf_unoverride(dr); 1437 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1438 db->db_state != DB_NOFILL) { 1439 /* Already released on initial dirty, so just thaw. */ 1440 ASSERT(arc_released(db->db_buf)); 1441 arc_buf_thaw(db->db_buf); 1442 } 1443 } 1444 } 1445 1446 dbuf_dirty_record_t * 1447 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1448 { 1449 dnode_t *dn; 1450 objset_t *os; 1451 dbuf_dirty_record_t **drp, *dr; 1452 int drop_struct_lock = FALSE; 1453 int txgoff = tx->tx_txg & TXG_MASK; 1454 1455 ASSERT(tx->tx_txg != 0); 1456 ASSERT(!refcount_is_zero(&db->db_holds)); 1457 DMU_TX_DIRTY_BUF(tx, db); 1458 1459 DB_DNODE_ENTER(db); 1460 dn = DB_DNODE(db); 1461 /* 1462 * Shouldn't dirty a regular buffer in syncing context. Private 1463 * objects may be dirtied in syncing context, but only if they 1464 * were already pre-dirtied in open context. 1465 */ 1466 #ifdef DEBUG 1467 if (dn->dn_objset->os_dsl_dataset != NULL) { 1468 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1469 RW_READER, FTAG); 1470 } 1471 ASSERT(!dmu_tx_is_syncing(tx) || 1472 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1473 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1474 dn->dn_objset->os_dsl_dataset == NULL); 1475 if (dn->dn_objset->os_dsl_dataset != NULL) 1476 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1477 #endif 1478 /* 1479 * We make this assert for private objects as well, but after we 1480 * check if we're already dirty. They are allowed to re-dirty 1481 * in syncing context. 1482 */ 1483 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1484 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1485 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1486 1487 mutex_enter(&db->db_mtx); 1488 /* 1489 * XXX make this true for indirects too? The problem is that 1490 * transactions created with dmu_tx_create_assigned() from 1491 * syncing context don't bother holding ahead. 1492 */ 1493 ASSERT(db->db_level != 0 || 1494 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1495 db->db_state == DB_NOFILL); 1496 1497 mutex_enter(&dn->dn_mtx); 1498 /* 1499 * Don't set dirtyctx to SYNC if we're just modifying this as we 1500 * initialize the objset. 1501 */ 1502 if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1503 if (dn->dn_objset->os_dsl_dataset != NULL) { 1504 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1505 RW_READER, FTAG); 1506 } 1507 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1508 dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1509 DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1510 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1511 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1512 } 1513 if (dn->dn_objset->os_dsl_dataset != NULL) { 1514 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1515 FTAG); 1516 } 1517 } 1518 mutex_exit(&dn->dn_mtx); 1519 1520 if (db->db_blkid == DMU_SPILL_BLKID) 1521 dn->dn_have_spill = B_TRUE; 1522 1523 /* 1524 * If this buffer is already dirty, we're done. 1525 */ 1526 drp = &db->db_last_dirty; 1527 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1528 db->db.db_object == DMU_META_DNODE_OBJECT); 1529 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1530 drp = &dr->dr_next; 1531 if (dr && dr->dr_txg == tx->tx_txg) { 1532 DB_DNODE_EXIT(db); 1533 1534 dbuf_redirty(dr); 1535 mutex_exit(&db->db_mtx); 1536 return (dr); 1537 } 1538 1539 /* 1540 * Only valid if not already dirty. 1541 */ 1542 ASSERT(dn->dn_object == 0 || 1543 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1544 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1545 1546 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1547 1548 /* 1549 * We should only be dirtying in syncing context if it's the 1550 * mos or we're initializing the os or it's a special object. 1551 * However, we are allowed to dirty in syncing context provided 1552 * we already dirtied it in open context. Hence we must make 1553 * this assertion only if we're not already dirty. 1554 */ 1555 os = dn->dn_objset; 1556 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1557 #ifdef DEBUG 1558 if (dn->dn_objset->os_dsl_dataset != NULL) 1559 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 1560 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1561 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1562 if (dn->dn_objset->os_dsl_dataset != NULL) 1563 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1564 #endif 1565 ASSERT(db->db.db_size != 0); 1566 1567 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1568 1569 if (db->db_blkid != DMU_BONUS_BLKID) { 1570 dmu_objset_willuse_space(os, db->db.db_size, tx); 1571 } 1572 1573 /* 1574 * If this buffer is dirty in an old transaction group we need 1575 * to make a copy of it so that the changes we make in this 1576 * transaction group won't leak out when we sync the older txg. 1577 */ 1578 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1579 if (db->db_level == 0) { 1580 void *data_old = db->db_buf; 1581 1582 if (db->db_state != DB_NOFILL) { 1583 if (db->db_blkid == DMU_BONUS_BLKID) { 1584 dbuf_fix_old_data(db, tx->tx_txg); 1585 data_old = db->db.db_data; 1586 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1587 /* 1588 * Release the data buffer from the cache so 1589 * that we can modify it without impacting 1590 * possible other users of this cached data 1591 * block. Note that indirect blocks and 1592 * private objects are not released until the 1593 * syncing state (since they are only modified 1594 * then). 1595 */ 1596 arc_release(db->db_buf, db); 1597 dbuf_fix_old_data(db, tx->tx_txg); 1598 data_old = db->db_buf; 1599 } 1600 ASSERT(data_old != NULL); 1601 } 1602 dr->dt.dl.dr_data = data_old; 1603 } else { 1604 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1605 list_create(&dr->dt.di.dr_children, 1606 sizeof (dbuf_dirty_record_t), 1607 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1608 } 1609 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 1610 dr->dr_accounted = db->db.db_size; 1611 dr->dr_dbuf = db; 1612 dr->dr_txg = tx->tx_txg; 1613 dr->dr_next = *drp; 1614 *drp = dr; 1615 1616 /* 1617 * We could have been freed_in_flight between the dbuf_noread 1618 * and dbuf_dirty. We win, as though the dbuf_noread() had 1619 * happened after the free. 1620 */ 1621 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1622 db->db_blkid != DMU_SPILL_BLKID) { 1623 mutex_enter(&dn->dn_mtx); 1624 if (dn->dn_free_ranges[txgoff] != NULL) { 1625 range_tree_clear(dn->dn_free_ranges[txgoff], 1626 db->db_blkid, 1); 1627 } 1628 mutex_exit(&dn->dn_mtx); 1629 db->db_freed_in_flight = FALSE; 1630 } 1631 1632 /* 1633 * This buffer is now part of this txg 1634 */ 1635 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1636 db->db_dirtycnt += 1; 1637 ASSERT3U(db->db_dirtycnt, <=, 3); 1638 1639 mutex_exit(&db->db_mtx); 1640 1641 if (db->db_blkid == DMU_BONUS_BLKID || 1642 db->db_blkid == DMU_SPILL_BLKID) { 1643 mutex_enter(&dn->dn_mtx); 1644 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1645 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1646 mutex_exit(&dn->dn_mtx); 1647 dnode_setdirty(dn, tx); 1648 DB_DNODE_EXIT(db); 1649 return (dr); 1650 } 1651 1652 /* 1653 * The dn_struct_rwlock prevents db_blkptr from changing 1654 * due to a write from syncing context completing 1655 * while we are running, so we want to acquire it before 1656 * looking at db_blkptr. 1657 */ 1658 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1659 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1660 drop_struct_lock = TRUE; 1661 } 1662 1663 /* 1664 * We need to hold the dn_struct_rwlock to make this assertion, 1665 * because it protects dn_phys / dn_next_nlevels from changing. 1666 */ 1667 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1668 dn->dn_phys->dn_nlevels > db->db_level || 1669 dn->dn_next_nlevels[txgoff] > db->db_level || 1670 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1671 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1672 1673 /* 1674 * If we are overwriting a dedup BP, then unless it is snapshotted, 1675 * when we get to syncing context we will need to decrement its 1676 * refcount in the DDT. Prefetch the relevant DDT block so that 1677 * syncing context won't have to wait for the i/o. 1678 */ 1679 ddt_prefetch(os->os_spa, db->db_blkptr); 1680 1681 if (db->db_level == 0) { 1682 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1683 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1684 } 1685 1686 if (db->db_level+1 < dn->dn_nlevels) { 1687 dmu_buf_impl_t *parent = db->db_parent; 1688 dbuf_dirty_record_t *di; 1689 int parent_held = FALSE; 1690 1691 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1692 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1693 1694 parent = dbuf_hold_level(dn, db->db_level+1, 1695 db->db_blkid >> epbs, FTAG); 1696 ASSERT(parent != NULL); 1697 parent_held = TRUE; 1698 } 1699 if (drop_struct_lock) 1700 rw_exit(&dn->dn_struct_rwlock); 1701 ASSERT3U(db->db_level+1, ==, parent->db_level); 1702 di = dbuf_dirty(parent, tx); 1703 if (parent_held) 1704 dbuf_rele(parent, FTAG); 1705 1706 mutex_enter(&db->db_mtx); 1707 /* 1708 * Since we've dropped the mutex, it's possible that 1709 * dbuf_undirty() might have changed this out from under us. 1710 */ 1711 if (db->db_last_dirty == dr || 1712 dn->dn_object == DMU_META_DNODE_OBJECT) { 1713 mutex_enter(&di->dt.di.dr_mtx); 1714 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1715 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1716 list_insert_tail(&di->dt.di.dr_children, dr); 1717 mutex_exit(&di->dt.di.dr_mtx); 1718 dr->dr_parent = di; 1719 } 1720 mutex_exit(&db->db_mtx); 1721 } else { 1722 ASSERT(db->db_level+1 == dn->dn_nlevels); 1723 ASSERT(db->db_blkid < dn->dn_nblkptr); 1724 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1725 mutex_enter(&dn->dn_mtx); 1726 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1727 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1728 mutex_exit(&dn->dn_mtx); 1729 if (drop_struct_lock) 1730 rw_exit(&dn->dn_struct_rwlock); 1731 } 1732 1733 dnode_setdirty(dn, tx); 1734 DB_DNODE_EXIT(db); 1735 return (dr); 1736 } 1737 1738 /* 1739 * Undirty a buffer in the transaction group referenced by the given 1740 * transaction. Return whether this evicted the dbuf. 1741 */ 1742 static boolean_t 1743 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1744 { 1745 dnode_t *dn; 1746 uint64_t txg = tx->tx_txg; 1747 dbuf_dirty_record_t *dr, **drp; 1748 1749 ASSERT(txg != 0); 1750 1751 /* 1752 * Due to our use of dn_nlevels below, this can only be called 1753 * in open context, unless we are operating on the MOS. 1754 * From syncing context, dn_nlevels may be different from the 1755 * dn_nlevels used when dbuf was dirtied. 1756 */ 1757 ASSERT(db->db_objset == 1758 dmu_objset_pool(db->db_objset)->dp_meta_objset || 1759 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1760 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1761 ASSERT0(db->db_level); 1762 ASSERT(MUTEX_HELD(&db->db_mtx)); 1763 1764 /* 1765 * If this buffer is not dirty, we're done. 1766 */ 1767 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1768 if (dr->dr_txg <= txg) 1769 break; 1770 if (dr == NULL || dr->dr_txg < txg) 1771 return (B_FALSE); 1772 ASSERT(dr->dr_txg == txg); 1773 ASSERT(dr->dr_dbuf == db); 1774 1775 DB_DNODE_ENTER(db); 1776 dn = DB_DNODE(db); 1777 1778 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1779 1780 ASSERT(db->db.db_size != 0); 1781 1782 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 1783 dr->dr_accounted, txg); 1784 1785 *drp = dr->dr_next; 1786 1787 /* 1788 * Note that there are three places in dbuf_dirty() 1789 * where this dirty record may be put on a list. 1790 * Make sure to do a list_remove corresponding to 1791 * every one of those list_insert calls. 1792 */ 1793 if (dr->dr_parent) { 1794 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1795 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1796 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1797 } else if (db->db_blkid == DMU_SPILL_BLKID || 1798 db->db_level + 1 == dn->dn_nlevels) { 1799 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1800 mutex_enter(&dn->dn_mtx); 1801 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1802 mutex_exit(&dn->dn_mtx); 1803 } 1804 DB_DNODE_EXIT(db); 1805 1806 if (db->db_state != DB_NOFILL) { 1807 dbuf_unoverride(dr); 1808 1809 ASSERT(db->db_buf != NULL); 1810 ASSERT(dr->dt.dl.dr_data != NULL); 1811 if (dr->dt.dl.dr_data != db->db_buf) 1812 arc_buf_destroy(dr->dt.dl.dr_data, db); 1813 } 1814 1815 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1816 1817 ASSERT(db->db_dirtycnt > 0); 1818 db->db_dirtycnt -= 1; 1819 1820 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1821 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1822 dbuf_destroy(db); 1823 return (B_TRUE); 1824 } 1825 1826 return (B_FALSE); 1827 } 1828 1829 void 1830 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1831 { 1832 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1833 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1834 1835 ASSERT(tx->tx_txg != 0); 1836 ASSERT(!refcount_is_zero(&db->db_holds)); 1837 1838 /* 1839 * Quick check for dirtyness. For already dirty blocks, this 1840 * reduces runtime of this function by >90%, and overall performance 1841 * by 50% for some workloads (e.g. file deletion with indirect blocks 1842 * cached). 1843 */ 1844 mutex_enter(&db->db_mtx); 1845 dbuf_dirty_record_t *dr; 1846 for (dr = db->db_last_dirty; 1847 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 1848 /* 1849 * It's possible that it is already dirty but not cached, 1850 * because there are some calls to dbuf_dirty() that don't 1851 * go through dmu_buf_will_dirty(). 1852 */ 1853 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 1854 /* This dbuf is already dirty and cached. */ 1855 dbuf_redirty(dr); 1856 mutex_exit(&db->db_mtx); 1857 return; 1858 } 1859 } 1860 mutex_exit(&db->db_mtx); 1861 1862 DB_DNODE_ENTER(db); 1863 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1864 rf |= DB_RF_HAVESTRUCT; 1865 DB_DNODE_EXIT(db); 1866 (void) dbuf_read(db, NULL, rf); 1867 (void) dbuf_dirty(db, tx); 1868 } 1869 1870 void 1871 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1872 { 1873 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1874 1875 db->db_state = DB_NOFILL; 1876 1877 dmu_buf_will_fill(db_fake, tx); 1878 } 1879 1880 void 1881 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1882 { 1883 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1884 1885 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1886 ASSERT(tx->tx_txg != 0); 1887 ASSERT(db->db_level == 0); 1888 ASSERT(!refcount_is_zero(&db->db_holds)); 1889 1890 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1891 dmu_tx_private_ok(tx)); 1892 1893 dbuf_noread(db); 1894 (void) dbuf_dirty(db, tx); 1895 } 1896 1897 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1898 /* ARGSUSED */ 1899 void 1900 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1901 { 1902 mutex_enter(&db->db_mtx); 1903 DBUF_VERIFY(db); 1904 1905 if (db->db_state == DB_FILL) { 1906 if (db->db_level == 0 && db->db_freed_in_flight) { 1907 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1908 /* we were freed while filling */ 1909 /* XXX dbuf_undirty? */ 1910 bzero(db->db.db_data, db->db.db_size); 1911 db->db_freed_in_flight = FALSE; 1912 } 1913 db->db_state = DB_CACHED; 1914 cv_broadcast(&db->db_changed); 1915 } 1916 mutex_exit(&db->db_mtx); 1917 } 1918 1919 void 1920 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 1921 bp_embedded_type_t etype, enum zio_compress comp, 1922 int uncompressed_size, int compressed_size, int byteorder, 1923 dmu_tx_t *tx) 1924 { 1925 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1926 struct dirty_leaf *dl; 1927 dmu_object_type_t type; 1928 1929 if (etype == BP_EMBEDDED_TYPE_DATA) { 1930 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1931 SPA_FEATURE_EMBEDDED_DATA)); 1932 } 1933 1934 DB_DNODE_ENTER(db); 1935 type = DB_DNODE(db)->dn_type; 1936 DB_DNODE_EXIT(db); 1937 1938 ASSERT0(db->db_level); 1939 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1940 1941 dmu_buf_will_not_fill(dbuf, tx); 1942 1943 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1944 dl = &db->db_last_dirty->dt.dl; 1945 encode_embedded_bp_compressed(&dl->dr_overridden_by, 1946 data, comp, uncompressed_size, compressed_size); 1947 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 1948 BP_SET_TYPE(&dl->dr_overridden_by, type); 1949 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 1950 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 1951 1952 dl->dr_override_state = DR_OVERRIDDEN; 1953 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 1954 } 1955 1956 /* 1957 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1958 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1959 */ 1960 void 1961 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1962 { 1963 ASSERT(!refcount_is_zero(&db->db_holds)); 1964 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1965 ASSERT(db->db_level == 0); 1966 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 1967 ASSERT(buf != NULL); 1968 ASSERT(arc_buf_lsize(buf) == db->db.db_size); 1969 ASSERT(tx->tx_txg != 0); 1970 1971 arc_return_buf(buf, db); 1972 ASSERT(arc_released(buf)); 1973 1974 mutex_enter(&db->db_mtx); 1975 1976 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1977 cv_wait(&db->db_changed, &db->db_mtx); 1978 1979 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1980 1981 if (db->db_state == DB_CACHED && 1982 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1983 mutex_exit(&db->db_mtx); 1984 (void) dbuf_dirty(db, tx); 1985 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1986 arc_buf_destroy(buf, db); 1987 xuio_stat_wbuf_copied(); 1988 return; 1989 } 1990 1991 xuio_stat_wbuf_nocopy(); 1992 if (db->db_state == DB_CACHED) { 1993 dbuf_dirty_record_t *dr = db->db_last_dirty; 1994 1995 ASSERT(db->db_buf != NULL); 1996 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1997 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1998 if (!arc_released(db->db_buf)) { 1999 ASSERT(dr->dt.dl.dr_override_state == 2000 DR_OVERRIDDEN); 2001 arc_release(db->db_buf, db); 2002 } 2003 dr->dt.dl.dr_data = buf; 2004 arc_buf_destroy(db->db_buf, db); 2005 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 2006 arc_release(db->db_buf, db); 2007 arc_buf_destroy(db->db_buf, db); 2008 } 2009 db->db_buf = NULL; 2010 } 2011 ASSERT(db->db_buf == NULL); 2012 dbuf_set_data(db, buf); 2013 db->db_state = DB_FILL; 2014 mutex_exit(&db->db_mtx); 2015 (void) dbuf_dirty(db, tx); 2016 dmu_buf_fill_done(&db->db, tx); 2017 } 2018 2019 void 2020 dbuf_destroy(dmu_buf_impl_t *db) 2021 { 2022 dnode_t *dn; 2023 dmu_buf_impl_t *parent = db->db_parent; 2024 dmu_buf_impl_t *dndb; 2025 2026 ASSERT(MUTEX_HELD(&db->db_mtx)); 2027 ASSERT(refcount_is_zero(&db->db_holds)); 2028 2029 if (db->db_buf != NULL) { 2030 arc_buf_destroy(db->db_buf, db); 2031 db->db_buf = NULL; 2032 } 2033 2034 if (db->db_blkid == DMU_BONUS_BLKID) { 2035 ASSERT(db->db.db_data != NULL); 2036 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 2037 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2038 db->db_state = DB_UNCACHED; 2039 } 2040 2041 dbuf_clear_data(db); 2042 2043 if (multilist_link_active(&db->db_cache_link)) { 2044 multilist_remove(dbuf_cache, db); 2045 (void) refcount_remove_many(&dbuf_cache_size, 2046 db->db.db_size, db); 2047 } 2048 2049 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2050 ASSERT(db->db_data_pending == NULL); 2051 2052 db->db_state = DB_EVICTING; 2053 db->db_blkptr = NULL; 2054 2055 /* 2056 * Now that db_state is DB_EVICTING, nobody else can find this via 2057 * the hash table. We can now drop db_mtx, which allows us to 2058 * acquire the dn_dbufs_mtx. 2059 */ 2060 mutex_exit(&db->db_mtx); 2061 2062 DB_DNODE_ENTER(db); 2063 dn = DB_DNODE(db); 2064 dndb = dn->dn_dbuf; 2065 if (db->db_blkid != DMU_BONUS_BLKID) { 2066 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2067 if (needlock) 2068 mutex_enter(&dn->dn_dbufs_mtx); 2069 avl_remove(&dn->dn_dbufs, db); 2070 atomic_dec_32(&dn->dn_dbufs_count); 2071 membar_producer(); 2072 DB_DNODE_EXIT(db); 2073 if (needlock) 2074 mutex_exit(&dn->dn_dbufs_mtx); 2075 /* 2076 * Decrementing the dbuf count means that the hold corresponding 2077 * to the removed dbuf is no longer discounted in dnode_move(), 2078 * so the dnode cannot be moved until after we release the hold. 2079 * The membar_producer() ensures visibility of the decremented 2080 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2081 * release any lock. 2082 */ 2083 dnode_rele(dn, db); 2084 db->db_dnode_handle = NULL; 2085 2086 dbuf_hash_remove(db); 2087 } else { 2088 DB_DNODE_EXIT(db); 2089 } 2090 2091 ASSERT(refcount_is_zero(&db->db_holds)); 2092 2093 db->db_parent = NULL; 2094 2095 ASSERT(db->db_buf == NULL); 2096 ASSERT(db->db.db_data == NULL); 2097 ASSERT(db->db_hash_next == NULL); 2098 ASSERT(db->db_blkptr == NULL); 2099 ASSERT(db->db_data_pending == NULL); 2100 ASSERT(!multilist_link_active(&db->db_cache_link)); 2101 2102 kmem_cache_free(dbuf_kmem_cache, db); 2103 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2104 2105 /* 2106 * If this dbuf is referenced from an indirect dbuf, 2107 * decrement the ref count on the indirect dbuf. 2108 */ 2109 if (parent && parent != dndb) 2110 dbuf_rele(parent, db); 2111 } 2112 2113 /* 2114 * Note: While bpp will always be updated if the function returns success, 2115 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2116 * this happens when the dnode is the meta-dnode, or a userused or groupused 2117 * object. 2118 */ 2119 static int 2120 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2121 dmu_buf_impl_t **parentp, blkptr_t **bpp) 2122 { 2123 *parentp = NULL; 2124 *bpp = NULL; 2125 2126 ASSERT(blkid != DMU_BONUS_BLKID); 2127 2128 if (blkid == DMU_SPILL_BLKID) { 2129 mutex_enter(&dn->dn_mtx); 2130 if (dn->dn_have_spill && 2131 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 2132 *bpp = &dn->dn_phys->dn_spill; 2133 else 2134 *bpp = NULL; 2135 dbuf_add_ref(dn->dn_dbuf, NULL); 2136 *parentp = dn->dn_dbuf; 2137 mutex_exit(&dn->dn_mtx); 2138 return (0); 2139 } 2140 2141 int nlevels = 2142 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 2143 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2144 2145 ASSERT3U(level * epbs, <, 64); 2146 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2147 /* 2148 * This assertion shouldn't trip as long as the max indirect block size 2149 * is less than 1M. The reason for this is that up to that point, 2150 * the number of levels required to address an entire object with blocks 2151 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 2152 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 2153 * (i.e. we can address the entire object), objects will all use at most 2154 * N-1 levels and the assertion won't overflow. However, once epbs is 2155 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 2156 * enough to address an entire object, so objects will have 5 levels, 2157 * but then this assertion will overflow. 2158 * 2159 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 2160 * need to redo this logic to handle overflows. 2161 */ 2162 ASSERT(level >= nlevels || 2163 ((nlevels - level - 1) * epbs) + 2164 highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2165 if (level >= nlevels || 2166 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 2167 ((nlevels - level - 1) * epbs)) || 2168 (fail_sparse && 2169 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2170 /* the buffer has no parent yet */ 2171 return (SET_ERROR(ENOENT)); 2172 } else if (level < nlevels-1) { 2173 /* this block is referenced from an indirect block */ 2174 int err = dbuf_hold_impl(dn, level+1, 2175 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2176 if (err) 2177 return (err); 2178 err = dbuf_read(*parentp, NULL, 2179 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2180 if (err) { 2181 dbuf_rele(*parentp, NULL); 2182 *parentp = NULL; 2183 return (err); 2184 } 2185 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2186 (blkid & ((1ULL << epbs) - 1)); 2187 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 2188 ASSERT(BP_IS_HOLE(*bpp)); 2189 return (0); 2190 } else { 2191 /* the block is referenced from the dnode */ 2192 ASSERT3U(level, ==, nlevels-1); 2193 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2194 blkid < dn->dn_phys->dn_nblkptr); 2195 if (dn->dn_dbuf) { 2196 dbuf_add_ref(dn->dn_dbuf, NULL); 2197 *parentp = dn->dn_dbuf; 2198 } 2199 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2200 return (0); 2201 } 2202 } 2203 2204 static dmu_buf_impl_t * 2205 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2206 dmu_buf_impl_t *parent, blkptr_t *blkptr) 2207 { 2208 objset_t *os = dn->dn_objset; 2209 dmu_buf_impl_t *db, *odb; 2210 2211 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2212 ASSERT(dn->dn_type != DMU_OT_NONE); 2213 2214 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2215 2216 db->db_objset = os; 2217 db->db.db_object = dn->dn_object; 2218 db->db_level = level; 2219 db->db_blkid = blkid; 2220 db->db_last_dirty = NULL; 2221 db->db_dirtycnt = 0; 2222 db->db_dnode_handle = dn->dn_handle; 2223 db->db_parent = parent; 2224 db->db_blkptr = blkptr; 2225 2226 db->db_user = NULL; 2227 db->db_user_immediate_evict = FALSE; 2228 db->db_freed_in_flight = FALSE; 2229 db->db_pending_evict = FALSE; 2230 2231 if (blkid == DMU_BONUS_BLKID) { 2232 ASSERT3P(parent, ==, dn->dn_dbuf); 2233 db->db.db_size = DN_MAX_BONUSLEN - 2234 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 2235 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 2236 db->db.db_offset = DMU_BONUS_BLKID; 2237 db->db_state = DB_UNCACHED; 2238 /* the bonus dbuf is not placed in the hash table */ 2239 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2240 return (db); 2241 } else if (blkid == DMU_SPILL_BLKID) { 2242 db->db.db_size = (blkptr != NULL) ? 2243 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 2244 db->db.db_offset = 0; 2245 } else { 2246 int blocksize = 2247 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2248 db->db.db_size = blocksize; 2249 db->db.db_offset = db->db_blkid * blocksize; 2250 } 2251 2252 /* 2253 * Hold the dn_dbufs_mtx while we get the new dbuf 2254 * in the hash table *and* added to the dbufs list. 2255 * This prevents a possible deadlock with someone 2256 * trying to look up this dbuf before its added to the 2257 * dn_dbufs list. 2258 */ 2259 mutex_enter(&dn->dn_dbufs_mtx); 2260 db->db_state = DB_EVICTING; 2261 if ((odb = dbuf_hash_insert(db)) != NULL) { 2262 /* someone else inserted it first */ 2263 kmem_cache_free(dbuf_kmem_cache, db); 2264 mutex_exit(&dn->dn_dbufs_mtx); 2265 return (odb); 2266 } 2267 avl_add(&dn->dn_dbufs, db); 2268 2269 db->db_state = DB_UNCACHED; 2270 mutex_exit(&dn->dn_dbufs_mtx); 2271 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2272 2273 if (parent && parent != dn->dn_dbuf) 2274 dbuf_add_ref(parent, db); 2275 2276 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2277 refcount_count(&dn->dn_holds) > 0); 2278 (void) refcount_add(&dn->dn_holds, db); 2279 atomic_inc_32(&dn->dn_dbufs_count); 2280 2281 dprintf_dbuf(db, "db=%p\n", db); 2282 2283 return (db); 2284 } 2285 2286 typedef struct dbuf_prefetch_arg { 2287 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2288 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2289 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2290 int dpa_curlevel; /* The current level that we're reading */ 2291 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2292 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2293 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2294 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2295 } dbuf_prefetch_arg_t; 2296 2297 /* 2298 * Actually issue the prefetch read for the block given. 2299 */ 2300 static void 2301 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2302 { 2303 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2304 return; 2305 2306 arc_flags_t aflags = 2307 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2308 2309 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2310 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2311 ASSERT(dpa->dpa_zio != NULL); 2312 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2313 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2314 &aflags, &dpa->dpa_zb); 2315 } 2316 2317 /* 2318 * Called when an indirect block above our prefetch target is read in. This 2319 * will either read in the next indirect block down the tree or issue the actual 2320 * prefetch if the next block down is our target. 2321 */ 2322 static void 2323 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2324 { 2325 dbuf_prefetch_arg_t *dpa = private; 2326 2327 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2328 ASSERT3S(dpa->dpa_curlevel, >, 0); 2329 2330 /* 2331 * The dpa_dnode is only valid if we are called with a NULL 2332 * zio. This indicates that the arc_read() returned without 2333 * first calling zio_read() to issue a physical read. Once 2334 * a physical read is made the dpa_dnode must be invalidated 2335 * as the locks guarding it may have been dropped. If the 2336 * dpa_dnode is still valid, then we want to add it to the dbuf 2337 * cache. To do so, we must hold the dbuf associated with the block 2338 * we just prefetched, read its contents so that we associate it 2339 * with an arc_buf_t, and then release it. 2340 */ 2341 if (zio != NULL) { 2342 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2343 if (zio->io_flags & ZIO_FLAG_RAW) { 2344 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2345 } else { 2346 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2347 } 2348 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2349 2350 dpa->dpa_dnode = NULL; 2351 } else if (dpa->dpa_dnode != NULL) { 2352 uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2353 (dpa->dpa_epbs * (dpa->dpa_curlevel - 2354 dpa->dpa_zb.zb_level)); 2355 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2356 dpa->dpa_curlevel, curblkid, FTAG); 2357 (void) dbuf_read(db, NULL, 2358 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2359 dbuf_rele(db, FTAG); 2360 } 2361 2362 dpa->dpa_curlevel--; 2363 2364 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2365 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2366 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2367 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2368 if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { 2369 kmem_free(dpa, sizeof (*dpa)); 2370 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2371 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2372 dbuf_issue_final_prefetch(dpa, bp); 2373 kmem_free(dpa, sizeof (*dpa)); 2374 } else { 2375 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2376 zbookmark_phys_t zb; 2377 2378 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2379 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 2380 iter_aflags |= ARC_FLAG_L2CACHE; 2381 2382 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2383 2384 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2385 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2386 2387 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2388 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2389 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2390 &iter_aflags, &zb); 2391 } 2392 2393 arc_buf_destroy(abuf, private); 2394 } 2395 2396 /* 2397 * Issue prefetch reads for the given block on the given level. If the indirect 2398 * blocks above that block are not in memory, we will read them in 2399 * asynchronously. As a result, this call never blocks waiting for a read to 2400 * complete. 2401 */ 2402 void 2403 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2404 arc_flags_t aflags) 2405 { 2406 blkptr_t bp; 2407 int epbs, nlevels, curlevel; 2408 uint64_t curblkid; 2409 2410 ASSERT(blkid != DMU_BONUS_BLKID); 2411 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2412 2413 if (blkid > dn->dn_maxblkid) 2414 return; 2415 2416 if (dnode_block_freed(dn, blkid)) 2417 return; 2418 2419 /* 2420 * This dnode hasn't been written to disk yet, so there's nothing to 2421 * prefetch. 2422 */ 2423 nlevels = dn->dn_phys->dn_nlevels; 2424 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2425 return; 2426 2427 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2428 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2429 return; 2430 2431 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2432 level, blkid); 2433 if (db != NULL) { 2434 mutex_exit(&db->db_mtx); 2435 /* 2436 * This dbuf already exists. It is either CACHED, or 2437 * (we assume) about to be read or filled. 2438 */ 2439 return; 2440 } 2441 2442 /* 2443 * Find the closest ancestor (indirect block) of the target block 2444 * that is present in the cache. In this indirect block, we will 2445 * find the bp that is at curlevel, curblkid. 2446 */ 2447 curlevel = level; 2448 curblkid = blkid; 2449 while (curlevel < nlevels - 1) { 2450 int parent_level = curlevel + 1; 2451 uint64_t parent_blkid = curblkid >> epbs; 2452 dmu_buf_impl_t *db; 2453 2454 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2455 FALSE, TRUE, FTAG, &db) == 0) { 2456 blkptr_t *bpp = db->db_buf->b_data; 2457 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2458 dbuf_rele(db, FTAG); 2459 break; 2460 } 2461 2462 curlevel = parent_level; 2463 curblkid = parent_blkid; 2464 } 2465 2466 if (curlevel == nlevels - 1) { 2467 /* No cached indirect blocks found. */ 2468 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2469 bp = dn->dn_phys->dn_blkptr[curblkid]; 2470 } 2471 if (BP_IS_HOLE(&bp)) 2472 return; 2473 2474 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2475 2476 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2477 ZIO_FLAG_CANFAIL); 2478 2479 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2480 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2481 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2482 dn->dn_object, level, blkid); 2483 dpa->dpa_curlevel = curlevel; 2484 dpa->dpa_prio = prio; 2485 dpa->dpa_aflags = aflags; 2486 dpa->dpa_spa = dn->dn_objset->os_spa; 2487 dpa->dpa_dnode = dn; 2488 dpa->dpa_epbs = epbs; 2489 dpa->dpa_zio = pio; 2490 2491 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2492 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2493 dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 2494 2495 /* 2496 * If we have the indirect just above us, no need to do the asynchronous 2497 * prefetch chain; we'll just run the last step ourselves. If we're at 2498 * a higher level, though, we want to issue the prefetches for all the 2499 * indirect blocks asynchronously, so we can go on with whatever we were 2500 * doing. 2501 */ 2502 if (curlevel == level) { 2503 ASSERT3U(curblkid, ==, blkid); 2504 dbuf_issue_final_prefetch(dpa, &bp); 2505 kmem_free(dpa, sizeof (*dpa)); 2506 } else { 2507 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2508 zbookmark_phys_t zb; 2509 2510 /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 2511 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 2512 iter_aflags |= ARC_FLAG_L2CACHE; 2513 2514 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2515 dn->dn_object, curlevel, curblkid); 2516 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2517 &bp, dbuf_prefetch_indirect_done, dpa, prio, 2518 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2519 &iter_aflags, &zb); 2520 } 2521 /* 2522 * We use pio here instead of dpa_zio since it's possible that 2523 * dpa may have already been freed. 2524 */ 2525 zio_nowait(pio); 2526 } 2527 2528 /* 2529 * Returns with db_holds incremented, and db_mtx not held. 2530 * Note: dn_struct_rwlock must be held. 2531 */ 2532 int 2533 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2534 boolean_t fail_sparse, boolean_t fail_uncached, 2535 void *tag, dmu_buf_impl_t **dbp) 2536 { 2537 dmu_buf_impl_t *db, *parent = NULL; 2538 2539 ASSERT(blkid != DMU_BONUS_BLKID); 2540 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2541 ASSERT3U(dn->dn_nlevels, >, level); 2542 2543 *dbp = NULL; 2544 top: 2545 /* dbuf_find() returns with db_mtx held */ 2546 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2547 2548 if (db == NULL) { 2549 blkptr_t *bp = NULL; 2550 int err; 2551 2552 if (fail_uncached) 2553 return (SET_ERROR(ENOENT)); 2554 2555 ASSERT3P(parent, ==, NULL); 2556 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2557 if (fail_sparse) { 2558 if (err == 0 && bp && BP_IS_HOLE(bp)) 2559 err = SET_ERROR(ENOENT); 2560 if (err) { 2561 if (parent) 2562 dbuf_rele(parent, NULL); 2563 return (err); 2564 } 2565 } 2566 if (err && err != ENOENT) 2567 return (err); 2568 db = dbuf_create(dn, level, blkid, parent, bp); 2569 } 2570 2571 if (fail_uncached && db->db_state != DB_CACHED) { 2572 mutex_exit(&db->db_mtx); 2573 return (SET_ERROR(ENOENT)); 2574 } 2575 2576 if (db->db_buf != NULL) 2577 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2578 2579 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2580 2581 /* 2582 * If this buffer is currently syncing out, and we are are 2583 * still referencing it from db_data, we need to make a copy 2584 * of it in case we decide we want to dirty it again in this txg. 2585 */ 2586 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2587 dn->dn_object != DMU_META_DNODE_OBJECT && 2588 db->db_state == DB_CACHED && db->db_data_pending) { 2589 dbuf_dirty_record_t *dr = db->db_data_pending; 2590 2591 if (dr->dt.dl.dr_data == db->db_buf) { 2592 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2593 2594 dbuf_set_data(db, 2595 arc_alloc_buf(dn->dn_objset->os_spa, db, type, 2596 db->db.db_size)); 2597 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2598 db->db.db_size); 2599 } 2600 } 2601 2602 if (multilist_link_active(&db->db_cache_link)) { 2603 ASSERT(refcount_is_zero(&db->db_holds)); 2604 multilist_remove(dbuf_cache, db); 2605 (void) refcount_remove_many(&dbuf_cache_size, 2606 db->db.db_size, db); 2607 } 2608 (void) refcount_add(&db->db_holds, tag); 2609 DBUF_VERIFY(db); 2610 mutex_exit(&db->db_mtx); 2611 2612 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2613 if (parent) 2614 dbuf_rele(parent, NULL); 2615 2616 ASSERT3P(DB_DNODE(db), ==, dn); 2617 ASSERT3U(db->db_blkid, ==, blkid); 2618 ASSERT3U(db->db_level, ==, level); 2619 *dbp = db; 2620 2621 return (0); 2622 } 2623 2624 dmu_buf_impl_t * 2625 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2626 { 2627 return (dbuf_hold_level(dn, 0, blkid, tag)); 2628 } 2629 2630 dmu_buf_impl_t * 2631 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2632 { 2633 dmu_buf_impl_t *db; 2634 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2635 return (err ? NULL : db); 2636 } 2637 2638 void 2639 dbuf_create_bonus(dnode_t *dn) 2640 { 2641 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2642 2643 ASSERT(dn->dn_bonus == NULL); 2644 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 2645 } 2646 2647 int 2648 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 2649 { 2650 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2651 dnode_t *dn; 2652 2653 if (db->db_blkid != DMU_SPILL_BLKID) 2654 return (SET_ERROR(ENOTSUP)); 2655 if (blksz == 0) 2656 blksz = SPA_MINBLOCKSIZE; 2657 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2658 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 2659 2660 DB_DNODE_ENTER(db); 2661 dn = DB_DNODE(db); 2662 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2663 dbuf_new_size(db, blksz, tx); 2664 rw_exit(&dn->dn_struct_rwlock); 2665 DB_DNODE_EXIT(db); 2666 2667 return (0); 2668 } 2669 2670 void 2671 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 2672 { 2673 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2674 } 2675 2676 #pragma weak dmu_buf_add_ref = dbuf_add_ref 2677 void 2678 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2679 { 2680 int64_t holds = refcount_add(&db->db_holds, tag); 2681 ASSERT3S(holds, >, 1); 2682 } 2683 2684 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2685 boolean_t 2686 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2687 void *tag) 2688 { 2689 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2690 dmu_buf_impl_t *found_db; 2691 boolean_t result = B_FALSE; 2692 2693 if (db->db_blkid == DMU_BONUS_BLKID) 2694 found_db = dbuf_find_bonus(os, obj); 2695 else 2696 found_db = dbuf_find(os, obj, 0, blkid); 2697 2698 if (found_db != NULL) { 2699 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2700 (void) refcount_add(&db->db_holds, tag); 2701 result = B_TRUE; 2702 } 2703 mutex_exit(&db->db_mtx); 2704 } 2705 return (result); 2706 } 2707 2708 /* 2709 * If you call dbuf_rele() you had better not be referencing the dnode handle 2710 * unless you have some other direct or indirect hold on the dnode. (An indirect 2711 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2712 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2713 * dnode's parent dbuf evicting its dnode handles. 2714 */ 2715 void 2716 dbuf_rele(dmu_buf_impl_t *db, void *tag) 2717 { 2718 mutex_enter(&db->db_mtx); 2719 dbuf_rele_and_unlock(db, tag); 2720 } 2721 2722 void 2723 dmu_buf_rele(dmu_buf_t *db, void *tag) 2724 { 2725 dbuf_rele((dmu_buf_impl_t *)db, tag); 2726 } 2727 2728 /* 2729 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2730 * db_dirtycnt and db_holds to be updated atomically. 2731 */ 2732 void 2733 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2734 { 2735 int64_t holds; 2736 2737 ASSERT(MUTEX_HELD(&db->db_mtx)); 2738 DBUF_VERIFY(db); 2739 2740 /* 2741 * Remove the reference to the dbuf before removing its hold on the 2742 * dnode so we can guarantee in dnode_move() that a referenced bonus 2743 * buffer has a corresponding dnode hold. 2744 */ 2745 holds = refcount_remove(&db->db_holds, tag); 2746 ASSERT(holds >= 0); 2747 2748 /* 2749 * We can't freeze indirects if there is a possibility that they 2750 * may be modified in the current syncing context. 2751 */ 2752 if (db->db_buf != NULL && 2753 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 2754 arc_buf_freeze(db->db_buf); 2755 } 2756 2757 if (holds == db->db_dirtycnt && 2758 db->db_level == 0 && db->db_user_immediate_evict) 2759 dbuf_evict_user(db); 2760 2761 if (holds == 0) { 2762 if (db->db_blkid == DMU_BONUS_BLKID) { 2763 dnode_t *dn; 2764 boolean_t evict_dbuf = db->db_pending_evict; 2765 2766 /* 2767 * If the dnode moves here, we cannot cross this 2768 * barrier until the move completes. 2769 */ 2770 DB_DNODE_ENTER(db); 2771 2772 dn = DB_DNODE(db); 2773 atomic_dec_32(&dn->dn_dbufs_count); 2774 2775 /* 2776 * Decrementing the dbuf count means that the bonus 2777 * buffer's dnode hold is no longer discounted in 2778 * dnode_move(). The dnode cannot move until after 2779 * the dnode_rele() below. 2780 */ 2781 DB_DNODE_EXIT(db); 2782 2783 /* 2784 * Do not reference db after its lock is dropped. 2785 * Another thread may evict it. 2786 */ 2787 mutex_exit(&db->db_mtx); 2788 2789 if (evict_dbuf) 2790 dnode_evict_bonus(dn); 2791 2792 dnode_rele(dn, db); 2793 } else if (db->db_buf == NULL) { 2794 /* 2795 * This is a special case: we never associated this 2796 * dbuf with any data allocated from the ARC. 2797 */ 2798 ASSERT(db->db_state == DB_UNCACHED || 2799 db->db_state == DB_NOFILL); 2800 dbuf_destroy(db); 2801 } else if (arc_released(db->db_buf)) { 2802 /* 2803 * This dbuf has anonymous data associated with it. 2804 */ 2805 dbuf_destroy(db); 2806 } else { 2807 boolean_t do_arc_evict = B_FALSE; 2808 blkptr_t bp; 2809 spa_t *spa = dmu_objset_spa(db->db_objset); 2810 2811 if (!DBUF_IS_CACHEABLE(db) && 2812 db->db_blkptr != NULL && 2813 !BP_IS_HOLE(db->db_blkptr) && 2814 !BP_IS_EMBEDDED(db->db_blkptr)) { 2815 do_arc_evict = B_TRUE; 2816 bp = *db->db_blkptr; 2817 } 2818 2819 if (!DBUF_IS_CACHEABLE(db) || 2820 db->db_pending_evict) { 2821 dbuf_destroy(db); 2822 } else if (!multilist_link_active(&db->db_cache_link)) { 2823 multilist_insert(dbuf_cache, db); 2824 (void) refcount_add_many(&dbuf_cache_size, 2825 db->db.db_size, db); 2826 mutex_exit(&db->db_mtx); 2827 2828 dbuf_evict_notify(); 2829 } 2830 2831 if (do_arc_evict) 2832 arc_freed(spa, &bp); 2833 } 2834 } else { 2835 mutex_exit(&db->db_mtx); 2836 } 2837 2838 } 2839 2840 #pragma weak dmu_buf_refcount = dbuf_refcount 2841 uint64_t 2842 dbuf_refcount(dmu_buf_impl_t *db) 2843 { 2844 return (refcount_count(&db->db_holds)); 2845 } 2846 2847 void * 2848 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2849 dmu_buf_user_t *new_user) 2850 { 2851 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2852 2853 mutex_enter(&db->db_mtx); 2854 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2855 if (db->db_user == old_user) 2856 db->db_user = new_user; 2857 else 2858 old_user = db->db_user; 2859 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2860 mutex_exit(&db->db_mtx); 2861 2862 return (old_user); 2863 } 2864 2865 void * 2866 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2867 { 2868 return (dmu_buf_replace_user(db_fake, NULL, user)); 2869 } 2870 2871 void * 2872 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2873 { 2874 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2875 2876 db->db_user_immediate_evict = TRUE; 2877 return (dmu_buf_set_user(db_fake, user)); 2878 } 2879 2880 void * 2881 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2882 { 2883 return (dmu_buf_replace_user(db_fake, user, NULL)); 2884 } 2885 2886 void * 2887 dmu_buf_get_user(dmu_buf_t *db_fake) 2888 { 2889 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2890 2891 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2892 return (db->db_user); 2893 } 2894 2895 void 2896 dmu_buf_user_evict_wait() 2897 { 2898 taskq_wait(dbu_evict_taskq); 2899 } 2900 2901 blkptr_t * 2902 dmu_buf_get_blkptr(dmu_buf_t *db) 2903 { 2904 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2905 return (dbi->db_blkptr); 2906 } 2907 2908 objset_t * 2909 dmu_buf_get_objset(dmu_buf_t *db) 2910 { 2911 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2912 return (dbi->db_objset); 2913 } 2914 2915 dnode_t * 2916 dmu_buf_dnode_enter(dmu_buf_t *db) 2917 { 2918 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2919 DB_DNODE_ENTER(dbi); 2920 return (DB_DNODE(dbi)); 2921 } 2922 2923 void 2924 dmu_buf_dnode_exit(dmu_buf_t *db) 2925 { 2926 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2927 DB_DNODE_EXIT(dbi); 2928 } 2929 2930 static void 2931 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2932 { 2933 /* ASSERT(dmu_tx_is_syncing(tx) */ 2934 ASSERT(MUTEX_HELD(&db->db_mtx)); 2935 2936 if (db->db_blkptr != NULL) 2937 return; 2938 2939 if (db->db_blkid == DMU_SPILL_BLKID) { 2940 db->db_blkptr = &dn->dn_phys->dn_spill; 2941 BP_ZERO(db->db_blkptr); 2942 return; 2943 } 2944 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2945 /* 2946 * This buffer was allocated at a time when there was 2947 * no available blkptrs from the dnode, or it was 2948 * inappropriate to hook it in (i.e., nlevels mis-match). 2949 */ 2950 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2951 ASSERT(db->db_parent == NULL); 2952 db->db_parent = dn->dn_dbuf; 2953 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2954 DBUF_VERIFY(db); 2955 } else { 2956 dmu_buf_impl_t *parent = db->db_parent; 2957 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2958 2959 ASSERT(dn->dn_phys->dn_nlevels > 1); 2960 if (parent == NULL) { 2961 mutex_exit(&db->db_mtx); 2962 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2963 parent = dbuf_hold_level(dn, db->db_level + 1, 2964 db->db_blkid >> epbs, db); 2965 rw_exit(&dn->dn_struct_rwlock); 2966 mutex_enter(&db->db_mtx); 2967 db->db_parent = parent; 2968 } 2969 db->db_blkptr = (blkptr_t *)parent->db.db_data + 2970 (db->db_blkid & ((1ULL << epbs) - 1)); 2971 DBUF_VERIFY(db); 2972 } 2973 } 2974 2975 static void 2976 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2977 { 2978 dmu_buf_impl_t *db = dr->dr_dbuf; 2979 dnode_t *dn; 2980 zio_t *zio; 2981 2982 ASSERT(dmu_tx_is_syncing(tx)); 2983 2984 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2985 2986 mutex_enter(&db->db_mtx); 2987 2988 ASSERT(db->db_level > 0); 2989 DBUF_VERIFY(db); 2990 2991 /* Read the block if it hasn't been read yet. */ 2992 if (db->db_buf == NULL) { 2993 mutex_exit(&db->db_mtx); 2994 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2995 mutex_enter(&db->db_mtx); 2996 } 2997 ASSERT3U(db->db_state, ==, DB_CACHED); 2998 ASSERT(db->db_buf != NULL); 2999 3000 DB_DNODE_ENTER(db); 3001 dn = DB_DNODE(db); 3002 /* Indirect block size must match what the dnode thinks it is. */ 3003 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3004 dbuf_check_blkptr(dn, db); 3005 DB_DNODE_EXIT(db); 3006 3007 /* Provide the pending dirty record to child dbufs */ 3008 db->db_data_pending = dr; 3009 3010 mutex_exit(&db->db_mtx); 3011 3012 dbuf_write(dr, db->db_buf, tx); 3013 3014 zio = dr->dr_zio; 3015 mutex_enter(&dr->dt.di.dr_mtx); 3016 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3017 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3018 mutex_exit(&dr->dt.di.dr_mtx); 3019 zio_nowait(zio); 3020 } 3021 3022 static void 3023 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3024 { 3025 arc_buf_t **datap = &dr->dt.dl.dr_data; 3026 dmu_buf_impl_t *db = dr->dr_dbuf; 3027 dnode_t *dn; 3028 objset_t *os; 3029 uint64_t txg = tx->tx_txg; 3030 3031 ASSERT(dmu_tx_is_syncing(tx)); 3032 3033 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3034 3035 mutex_enter(&db->db_mtx); 3036 /* 3037 * To be synced, we must be dirtied. But we 3038 * might have been freed after the dirty. 3039 */ 3040 if (db->db_state == DB_UNCACHED) { 3041 /* This buffer has been freed since it was dirtied */ 3042 ASSERT(db->db.db_data == NULL); 3043 } else if (db->db_state == DB_FILL) { 3044 /* This buffer was freed and is now being re-filled */ 3045 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3046 } else { 3047 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3048 } 3049 DBUF_VERIFY(db); 3050 3051 DB_DNODE_ENTER(db); 3052 dn = DB_DNODE(db); 3053 3054 if (db->db_blkid == DMU_SPILL_BLKID) { 3055 mutex_enter(&dn->dn_mtx); 3056 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 3057 mutex_exit(&dn->dn_mtx); 3058 } 3059 3060 /* 3061 * If this is a bonus buffer, simply copy the bonus data into the 3062 * dnode. It will be written out when the dnode is synced (and it 3063 * will be synced, since it must have been dirty for dbuf_sync to 3064 * be called). 3065 */ 3066 if (db->db_blkid == DMU_BONUS_BLKID) { 3067 dbuf_dirty_record_t **drp; 3068 3069 ASSERT(*datap != NULL); 3070 ASSERT0(db->db_level); 3071 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 3072 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 3073 DB_DNODE_EXIT(db); 3074 3075 if (*datap != db->db.db_data) { 3076 zio_buf_free(*datap, DN_MAX_BONUSLEN); 3077 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 3078 } 3079 db->db_data_pending = NULL; 3080 drp = &db->db_last_dirty; 3081 while (*drp != dr) 3082 drp = &(*drp)->dr_next; 3083 ASSERT(dr->dr_next == NULL); 3084 ASSERT(dr->dr_dbuf == db); 3085 *drp = dr->dr_next; 3086 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3087 ASSERT(db->db_dirtycnt > 0); 3088 db->db_dirtycnt -= 1; 3089 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 3090 return; 3091 } 3092 3093 os = dn->dn_objset; 3094 3095 /* 3096 * This function may have dropped the db_mtx lock allowing a dmu_sync 3097 * operation to sneak in. As a result, we need to ensure that we 3098 * don't check the dr_override_state until we have returned from 3099 * dbuf_check_blkptr. 3100 */ 3101 dbuf_check_blkptr(dn, db); 3102 3103 /* 3104 * If this buffer is in the middle of an immediate write, 3105 * wait for the synchronous IO to complete. 3106 */ 3107 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3108 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3109 cv_wait(&db->db_changed, &db->db_mtx); 3110 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3111 } 3112 3113 if (db->db_state != DB_NOFILL && 3114 dn->dn_object != DMU_META_DNODE_OBJECT && 3115 refcount_count(&db->db_holds) > 1 && 3116 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3117 *datap == db->db_buf) { 3118 /* 3119 * If this buffer is currently "in use" (i.e., there 3120 * are active holds and db_data still references it), 3121 * then make a copy before we start the write so that 3122 * any modifications from the open txg will not leak 3123 * into this write. 3124 * 3125 * NOTE: this copy does not need to be made for 3126 * objects only modified in the syncing context (e.g. 3127 * DNONE_DNODE blocks). 3128 */ 3129 int psize = arc_buf_size(*datap); 3130 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 3131 enum zio_compress compress_type = arc_get_compression(*datap); 3132 3133 if (compress_type == ZIO_COMPRESS_OFF) { 3134 *datap = arc_alloc_buf(os->os_spa, db, type, psize); 3135 } else { 3136 ASSERT3U(type, ==, ARC_BUFC_DATA); 3137 int lsize = arc_buf_lsize(*datap); 3138 *datap = arc_alloc_compressed_buf(os->os_spa, db, 3139 psize, lsize, compress_type); 3140 } 3141 bcopy(db->db.db_data, (*datap)->b_data, psize); 3142 } 3143 db->db_data_pending = dr; 3144 3145 mutex_exit(&db->db_mtx); 3146 3147 dbuf_write(dr, *datap, tx); 3148 3149 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3150 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3151 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3152 DB_DNODE_EXIT(db); 3153 } else { 3154 /* 3155 * Although zio_nowait() does not "wait for an IO", it does 3156 * initiate the IO. If this is an empty write it seems plausible 3157 * that the IO could actually be completed before the nowait 3158 * returns. We need to DB_DNODE_EXIT() first in case 3159 * zio_nowait() invalidates the dbuf. 3160 */ 3161 DB_DNODE_EXIT(db); 3162 zio_nowait(dr->dr_zio); 3163 } 3164 } 3165 3166 void 3167 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3168 { 3169 dbuf_dirty_record_t *dr; 3170 3171 while (dr = list_head(list)) { 3172 if (dr->dr_zio != NULL) { 3173 /* 3174 * If we find an already initialized zio then we 3175 * are processing the meta-dnode, and we have finished. 3176 * The dbufs for all dnodes are put back on the list 3177 * during processing, so that we can zio_wait() 3178 * these IOs after initiating all child IOs. 3179 */ 3180 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3181 DMU_META_DNODE_OBJECT); 3182 break; 3183 } 3184 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 3185 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 3186 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 3187 } 3188 list_remove(list, dr); 3189 if (dr->dr_dbuf->db_level > 0) 3190 dbuf_sync_indirect(dr, tx); 3191 else 3192 dbuf_sync_leaf(dr, tx); 3193 } 3194 } 3195 3196 /* ARGSUSED */ 3197 static void 3198 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3199 { 3200 dmu_buf_impl_t *db = vdb; 3201 dnode_t *dn; 3202 blkptr_t *bp = zio->io_bp; 3203 blkptr_t *bp_orig = &zio->io_bp_orig; 3204 spa_t *spa = zio->io_spa; 3205 int64_t delta; 3206 uint64_t fill = 0; 3207 int i; 3208 3209 ASSERT3P(db->db_blkptr, !=, NULL); 3210 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3211 3212 DB_DNODE_ENTER(db); 3213 dn = DB_DNODE(db); 3214 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3215 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3216 zio->io_prev_space_delta = delta; 3217 3218 if (bp->blk_birth != 0) { 3219 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 3220 BP_GET_TYPE(bp) == dn->dn_type) || 3221 (db->db_blkid == DMU_SPILL_BLKID && 3222 BP_GET_TYPE(bp) == dn->dn_bonustype) || 3223 BP_IS_EMBEDDED(bp)); 3224 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 3225 } 3226 3227 mutex_enter(&db->db_mtx); 3228 3229 #ifdef ZFS_DEBUG 3230 if (db->db_blkid == DMU_SPILL_BLKID) { 3231 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3232 ASSERT(!(BP_IS_HOLE(bp)) && 3233 db->db_blkptr == &dn->dn_phys->dn_spill); 3234 } 3235 #endif 3236 3237 if (db->db_level == 0) { 3238 mutex_enter(&dn->dn_mtx); 3239 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 3240 db->db_blkid != DMU_SPILL_BLKID) 3241 dn->dn_phys->dn_maxblkid = db->db_blkid; 3242 mutex_exit(&dn->dn_mtx); 3243 3244 if (dn->dn_type == DMU_OT_DNODE) { 3245 dnode_phys_t *dnp = db->db.db_data; 3246 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 3247 i--, dnp++) { 3248 if (dnp->dn_type != DMU_OT_NONE) 3249 fill++; 3250 } 3251 } else { 3252 if (BP_IS_HOLE(bp)) { 3253 fill = 0; 3254 } else { 3255 fill = 1; 3256 } 3257 } 3258 } else { 3259 blkptr_t *ibp = db->db.db_data; 3260 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3261 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3262 if (BP_IS_HOLE(ibp)) 3263 continue; 3264 fill += BP_GET_FILL(ibp); 3265 } 3266 } 3267 DB_DNODE_EXIT(db); 3268 3269 if (!BP_IS_EMBEDDED(bp)) 3270 bp->blk_fill = fill; 3271 3272 mutex_exit(&db->db_mtx); 3273 3274 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3275 *db->db_blkptr = *bp; 3276 rw_exit(&dn->dn_struct_rwlock); 3277 } 3278 3279 /* ARGSUSED */ 3280 /* 3281 * This function gets called just prior to running through the compression 3282 * stage of the zio pipeline. If we're an indirect block comprised of only 3283 * holes, then we want this indirect to be compressed away to a hole. In 3284 * order to do that we must zero out any information about the holes that 3285 * this indirect points to prior to before we try to compress it. 3286 */ 3287 static void 3288 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3289 { 3290 dmu_buf_impl_t *db = vdb; 3291 dnode_t *dn; 3292 blkptr_t *bp; 3293 unsigned int epbs, i; 3294 3295 ASSERT3U(db->db_level, >, 0); 3296 DB_DNODE_ENTER(db); 3297 dn = DB_DNODE(db); 3298 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3299 ASSERT3U(epbs, <, 31); 3300 3301 /* Determine if all our children are holes */ 3302 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 3303 if (!BP_IS_HOLE(bp)) 3304 break; 3305 } 3306 3307 /* 3308 * If all the children are holes, then zero them all out so that 3309 * we may get compressed away. 3310 */ 3311 if (i == 1 << epbs) { 3312 /* 3313 * We only found holes. Grab the rwlock to prevent 3314 * anybody from reading the blocks we're about to 3315 * zero out. 3316 */ 3317 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3318 bzero(db->db.db_data, db->db.db_size); 3319 rw_exit(&dn->dn_struct_rwlock); 3320 } 3321 DB_DNODE_EXIT(db); 3322 } 3323 3324 /* 3325 * The SPA will call this callback several times for each zio - once 3326 * for every physical child i/o (zio->io_phys_children times). This 3327 * allows the DMU to monitor the progress of each logical i/o. For example, 3328 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 3329 * block. There may be a long delay before all copies/fragments are completed, 3330 * so this callback allows us to retire dirty space gradually, as the physical 3331 * i/os complete. 3332 */ 3333 /* ARGSUSED */ 3334 static void 3335 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 3336 { 3337 dmu_buf_impl_t *db = arg; 3338 objset_t *os = db->db_objset; 3339 dsl_pool_t *dp = dmu_objset_pool(os); 3340 dbuf_dirty_record_t *dr; 3341 int delta = 0; 3342 3343 dr = db->db_data_pending; 3344 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 3345 3346 /* 3347 * The callback will be called io_phys_children times. Retire one 3348 * portion of our dirty space each time we are called. Any rounding 3349 * error will be cleaned up by dsl_pool_sync()'s call to 3350 * dsl_pool_undirty_space(). 3351 */ 3352 delta = dr->dr_accounted / zio->io_phys_children; 3353 dsl_pool_undirty_space(dp, delta, zio->io_txg); 3354 } 3355 3356 /* ARGSUSED */ 3357 static void 3358 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3359 { 3360 dmu_buf_impl_t *db = vdb; 3361 blkptr_t *bp_orig = &zio->io_bp_orig; 3362 blkptr_t *bp = db->db_blkptr; 3363 objset_t *os = db->db_objset; 3364 dmu_tx_t *tx = os->os_synctx; 3365 dbuf_dirty_record_t **drp, *dr; 3366 3367 ASSERT0(zio->io_error); 3368 ASSERT(db->db_blkptr == bp); 3369 3370 /* 3371 * For nopwrites and rewrites we ensure that the bp matches our 3372 * original and bypass all the accounting. 3373 */ 3374 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3375 ASSERT(BP_EQUAL(bp, bp_orig)); 3376 } else { 3377 dsl_dataset_t *ds = os->os_dsl_dataset; 3378 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3379 dsl_dataset_block_born(ds, bp, tx); 3380 } 3381 3382 mutex_enter(&db->db_mtx); 3383 3384 DBUF_VERIFY(db); 3385 3386 drp = &db->db_last_dirty; 3387 while ((dr = *drp) != db->db_data_pending) 3388 drp = &dr->dr_next; 3389 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3390 ASSERT(dr->dr_dbuf == db); 3391 ASSERT(dr->dr_next == NULL); 3392 *drp = dr->dr_next; 3393 3394 #ifdef ZFS_DEBUG 3395 if (db->db_blkid == DMU_SPILL_BLKID) { 3396 dnode_t *dn; 3397 3398 DB_DNODE_ENTER(db); 3399 dn = DB_DNODE(db); 3400 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3401 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 3402 db->db_blkptr == &dn->dn_phys->dn_spill); 3403 DB_DNODE_EXIT(db); 3404 } 3405 #endif 3406 3407 if (db->db_level == 0) { 3408 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3409 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 3410 if (db->db_state != DB_NOFILL) { 3411 if (dr->dt.dl.dr_data != db->db_buf) 3412 arc_buf_destroy(dr->dt.dl.dr_data, db); 3413 } 3414 } else { 3415 dnode_t *dn; 3416 3417 DB_DNODE_ENTER(db); 3418 dn = DB_DNODE(db); 3419 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3420 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3421 if (!BP_IS_HOLE(db->db_blkptr)) { 3422 int epbs = 3423 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3424 ASSERT3U(db->db_blkid, <=, 3425 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3426 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3427 db->db.db_size); 3428 } 3429 DB_DNODE_EXIT(db); 3430 mutex_destroy(&dr->dt.di.dr_mtx); 3431 list_destroy(&dr->dt.di.dr_children); 3432 } 3433 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3434 3435 cv_broadcast(&db->db_changed); 3436 ASSERT(db->db_dirtycnt > 0); 3437 db->db_dirtycnt -= 1; 3438 db->db_data_pending = NULL; 3439 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3440 } 3441 3442 static void 3443 dbuf_write_nofill_ready(zio_t *zio) 3444 { 3445 dbuf_write_ready(zio, NULL, zio->io_private); 3446 } 3447 3448 static void 3449 dbuf_write_nofill_done(zio_t *zio) 3450 { 3451 dbuf_write_done(zio, NULL, zio->io_private); 3452 } 3453 3454 static void 3455 dbuf_write_override_ready(zio_t *zio) 3456 { 3457 dbuf_dirty_record_t *dr = zio->io_private; 3458 dmu_buf_impl_t *db = dr->dr_dbuf; 3459 3460 dbuf_write_ready(zio, NULL, db); 3461 } 3462 3463 static void 3464 dbuf_write_override_done(zio_t *zio) 3465 { 3466 dbuf_dirty_record_t *dr = zio->io_private; 3467 dmu_buf_impl_t *db = dr->dr_dbuf; 3468 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3469 3470 mutex_enter(&db->db_mtx); 3471 if (!BP_EQUAL(zio->io_bp, obp)) { 3472 if (!BP_IS_HOLE(obp)) 3473 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3474 arc_release(dr->dt.dl.dr_data, db); 3475 } 3476 mutex_exit(&db->db_mtx); 3477 dbuf_write_done(zio, NULL, db); 3478 3479 if (zio->io_abd != NULL) 3480 abd_put(zio->io_abd); 3481 } 3482 3483 typedef struct dbuf_remap_impl_callback_arg { 3484 objset_t *drica_os; 3485 uint64_t drica_blk_birth; 3486 dmu_tx_t *drica_tx; 3487 } dbuf_remap_impl_callback_arg_t; 3488 3489 static void 3490 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 3491 void *arg) 3492 { 3493 dbuf_remap_impl_callback_arg_t *drica = arg; 3494 objset_t *os = drica->drica_os; 3495 spa_t *spa = dmu_objset_spa(os); 3496 dmu_tx_t *tx = drica->drica_tx; 3497 3498 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3499 3500 if (os == spa_meta_objset(spa)) { 3501 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 3502 } else { 3503 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 3504 size, drica->drica_blk_birth, tx); 3505 } 3506 } 3507 3508 static void 3509 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 3510 { 3511 blkptr_t bp_copy = *bp; 3512 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3513 dbuf_remap_impl_callback_arg_t drica; 3514 3515 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3516 3517 drica.drica_os = dn->dn_objset; 3518 drica.drica_blk_birth = bp->blk_birth; 3519 drica.drica_tx = tx; 3520 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 3521 &drica)) { 3522 /* 3523 * The struct_rwlock prevents dbuf_read_impl() from 3524 * dereferencing the BP while we are changing it. To 3525 * avoid lock contention, only grab it when we are actually 3526 * changing the BP. 3527 */ 3528 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3529 *bp = bp_copy; 3530 rw_exit(&dn->dn_struct_rwlock); 3531 } 3532 } 3533 3534 /* 3535 * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 3536 * to remap a copy of every bp in the dbuf. 3537 */ 3538 boolean_t 3539 dbuf_can_remap(const dmu_buf_impl_t *db) 3540 { 3541 spa_t *spa = dmu_objset_spa(db->db_objset); 3542 blkptr_t *bp = db->db.db_data; 3543 boolean_t ret = B_FALSE; 3544 3545 ASSERT3U(db->db_level, >, 0); 3546 ASSERT3S(db->db_state, ==, DB_CACHED); 3547 3548 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3549 3550 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3551 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3552 blkptr_t bp_copy = bp[i]; 3553 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3554 ret = B_TRUE; 3555 break; 3556 } 3557 } 3558 spa_config_exit(spa, SCL_VDEV, FTAG); 3559 3560 return (ret); 3561 } 3562 3563 boolean_t 3564 dnode_needs_remap(const dnode_t *dn) 3565 { 3566 spa_t *spa = dmu_objset_spa(dn->dn_objset); 3567 boolean_t ret = B_FALSE; 3568 3569 if (dn->dn_phys->dn_nlevels == 0) { 3570 return (B_FALSE); 3571 } 3572 3573 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 3574 3575 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3576 for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 3577 blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 3578 if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 3579 ret = B_TRUE; 3580 break; 3581 } 3582 } 3583 spa_config_exit(spa, SCL_VDEV, FTAG); 3584 3585 return (ret); 3586 } 3587 3588 /* 3589 * Remap any existing BP's to concrete vdevs, if possible. 3590 */ 3591 static void 3592 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 3593 { 3594 spa_t *spa = dmu_objset_spa(db->db_objset); 3595 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 3596 3597 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 3598 return; 3599 3600 if (db->db_level > 0) { 3601 blkptr_t *bp = db->db.db_data; 3602 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 3603 dbuf_remap_impl(dn, &bp[i], tx); 3604 } 3605 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 3606 dnode_phys_t *dnp = db->db.db_data; 3607 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 3608 DMU_OT_DNODE); 3609 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 3610 for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 3611 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 3612 } 3613 } 3614 } 3615 } 3616 3617 3618 /* Issue I/O to commit a dirty buffer to disk. */ 3619 static void 3620 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3621 { 3622 dmu_buf_impl_t *db = dr->dr_dbuf; 3623 dnode_t *dn; 3624 objset_t *os; 3625 dmu_buf_impl_t *parent = db->db_parent; 3626 uint64_t txg = tx->tx_txg; 3627 zbookmark_phys_t zb; 3628 zio_prop_t zp; 3629 zio_t *zio; 3630 int wp_flag = 0; 3631 3632 ASSERT(dmu_tx_is_syncing(tx)); 3633 3634 DB_DNODE_ENTER(db); 3635 dn = DB_DNODE(db); 3636 os = dn->dn_objset; 3637 3638 if (db->db_state != DB_NOFILL) { 3639 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3640 /* 3641 * Private object buffers are released here rather 3642 * than in dbuf_dirty() since they are only modified 3643 * in the syncing context and we don't want the 3644 * overhead of making multiple copies of the data. 3645 */ 3646 if (BP_IS_HOLE(db->db_blkptr)) { 3647 arc_buf_thaw(data); 3648 } else { 3649 dbuf_release_bp(db); 3650 } 3651 dbuf_remap(dn, db, tx); 3652 } 3653 } 3654 3655 if (parent != dn->dn_dbuf) { 3656 /* Our parent is an indirect block. */ 3657 /* We have a dirty parent that has been scheduled for write. */ 3658 ASSERT(parent && parent->db_data_pending); 3659 /* Our parent's buffer is one level closer to the dnode. */ 3660 ASSERT(db->db_level == parent->db_level-1); 3661 /* 3662 * We're about to modify our parent's db_data by modifying 3663 * our block pointer, so the parent must be released. 3664 */ 3665 ASSERT(arc_released(parent->db_buf)); 3666 zio = parent->db_data_pending->dr_zio; 3667 } else { 3668 /* Our parent is the dnode itself. */ 3669 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 3670 db->db_blkid != DMU_SPILL_BLKID) || 3671 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 3672 if (db->db_blkid != DMU_SPILL_BLKID) 3673 ASSERT3P(db->db_blkptr, ==, 3674 &dn->dn_phys->dn_blkptr[db->db_blkid]); 3675 zio = dn->dn_zio; 3676 } 3677 3678 ASSERT(db->db_level == 0 || data == db->db_buf); 3679 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3680 ASSERT(zio); 3681 3682 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3683 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3684 db->db.db_object, db->db_level, db->db_blkid); 3685 3686 if (db->db_blkid == DMU_SPILL_BLKID) 3687 wp_flag = WP_SPILL; 3688 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 3689 3690 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3691 DB_DNODE_EXIT(db); 3692 3693 /* 3694 * We copy the blkptr now (rather than when we instantiate the dirty 3695 * record), because its value can change between open context and 3696 * syncing context. We do not need to hold dn_struct_rwlock to read 3697 * db_blkptr because we are in syncing context. 3698 */ 3699 dr->dr_bp_copy = *db->db_blkptr; 3700 3701 if (db->db_level == 0 && 3702 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 3703 /* 3704 * The BP for this block has been provided by open context 3705 * (by dmu_sync() or dmu_buf_write_embedded()). 3706 */ 3707 abd_t *contents = (data != NULL) ? 3708 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 3709 3710 dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 3711 contents, db->db.db_size, db->db.db_size, &zp, 3712 dbuf_write_override_ready, NULL, NULL, 3713 dbuf_write_override_done, 3714 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3715 mutex_enter(&db->db_mtx); 3716 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3717 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 3718 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3719 mutex_exit(&db->db_mtx); 3720 } else if (db->db_state == DB_NOFILL) { 3721 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3722 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3723 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3724 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 3725 dbuf_write_nofill_ready, NULL, NULL, 3726 dbuf_write_nofill_done, db, 3727 ZIO_PRIORITY_ASYNC_WRITE, 3728 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3729 } else { 3730 ASSERT(arc_released(data)); 3731 3732 /* 3733 * For indirect blocks, we want to setup the children 3734 * ready callback so that we can properly handle an indirect 3735 * block that only contains holes. 3736 */ 3737 arc_done_func_t *children_ready_cb = NULL; 3738 if (db->db_level != 0) 3739 children_ready_cb = dbuf_write_children_ready; 3740 3741 dr->dr_zio = arc_write(zio, os->os_spa, txg, 3742 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3743 &zp, dbuf_write_ready, children_ready_cb, 3744 dbuf_write_physdone, dbuf_write_done, db, 3745 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3746 } 3747 } 3748