1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu.h> 28 #include <sys/dmu_impl.h> 29 #include <sys/dbuf.h> 30 #include <sys/dmu_objset.h> 31 #include <sys/dsl_dataset.h> 32 #include <sys/dsl_dir.h> 33 #include <sys/dmu_tx.h> 34 #include <sys/spa.h> 35 #include <sys/zio.h> 36 #include <sys/dmu_zfetch.h> 37 38 static void dbuf_destroy(dmu_buf_impl_t *db); 39 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 40 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 41 static arc_done_func_t dbuf_write_ready; 42 static arc_done_func_t dbuf_write_done; 43 static zio_done_func_t dbuf_skip_write_ready; 44 static zio_done_func_t dbuf_skip_write_done; 45 46 /* 47 * Global data structures and functions for the dbuf cache. 48 */ 49 static kmem_cache_t *dbuf_cache; 50 51 /* ARGSUSED */ 52 static int 53 dbuf_cons(void *vdb, void *unused, int kmflag) 54 { 55 dmu_buf_impl_t *db = vdb; 56 bzero(db, sizeof (dmu_buf_impl_t)); 57 58 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 59 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 60 refcount_create(&db->db_holds); 61 return (0); 62 } 63 64 /* ARGSUSED */ 65 static void 66 dbuf_dest(void *vdb, void *unused) 67 { 68 dmu_buf_impl_t *db = vdb; 69 mutex_destroy(&db->db_mtx); 70 cv_destroy(&db->db_changed); 71 refcount_destroy(&db->db_holds); 72 } 73 74 /* 75 * dbuf hash table routines 76 */ 77 static dbuf_hash_table_t dbuf_hash_table; 78 79 static uint64_t dbuf_hash_count; 80 81 static uint64_t 82 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 83 { 84 uintptr_t osv = (uintptr_t)os; 85 uint64_t crc = -1ULL; 86 87 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 88 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 89 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 90 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 91 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 92 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 93 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 94 95 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 96 97 return (crc); 98 } 99 100 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); 101 102 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 103 ((dbuf)->db.db_object == (obj) && \ 104 (dbuf)->db_objset == (os) && \ 105 (dbuf)->db_level == (level) && \ 106 (dbuf)->db_blkid == (blkid)) 107 108 dmu_buf_impl_t * 109 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) 110 { 111 dbuf_hash_table_t *h = &dbuf_hash_table; 112 objset_impl_t *os = dn->dn_objset; 113 uint64_t obj = dn->dn_object; 114 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 115 uint64_t idx = hv & h->hash_table_mask; 116 dmu_buf_impl_t *db; 117 118 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 119 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 120 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 121 mutex_enter(&db->db_mtx); 122 if (db->db_state != DB_EVICTING) { 123 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 124 return (db); 125 } 126 mutex_exit(&db->db_mtx); 127 } 128 } 129 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 130 return (NULL); 131 } 132 133 /* 134 * Insert an entry into the hash table. If there is already an element 135 * equal to elem in the hash table, then the already existing element 136 * will be returned and the new element will not be inserted. 137 * Otherwise returns NULL. 138 */ 139 static dmu_buf_impl_t * 140 dbuf_hash_insert(dmu_buf_impl_t *db) 141 { 142 dbuf_hash_table_t *h = &dbuf_hash_table; 143 objset_impl_t *os = db->db_objset; 144 uint64_t obj = db->db.db_object; 145 int level = db->db_level; 146 uint64_t blkid = db->db_blkid; 147 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 148 uint64_t idx = hv & h->hash_table_mask; 149 dmu_buf_impl_t *dbf; 150 151 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 152 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 153 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 154 mutex_enter(&dbf->db_mtx); 155 if (dbf->db_state != DB_EVICTING) { 156 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 157 return (dbf); 158 } 159 mutex_exit(&dbf->db_mtx); 160 } 161 } 162 163 mutex_enter(&db->db_mtx); 164 db->db_hash_next = h->hash_table[idx]; 165 h->hash_table[idx] = db; 166 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 167 atomic_add_64(&dbuf_hash_count, 1); 168 169 return (NULL); 170 } 171 172 /* 173 * Remove an entry from the hash table. This operation will 174 * fail if there are any existing holds on the db. 175 */ 176 static void 177 dbuf_hash_remove(dmu_buf_impl_t *db) 178 { 179 dbuf_hash_table_t *h = &dbuf_hash_table; 180 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, 181 db->db_level, db->db_blkid); 182 uint64_t idx = hv & h->hash_table_mask; 183 dmu_buf_impl_t *dbf, **dbp; 184 185 /* 186 * We musn't hold db_mtx to maintin lock ordering: 187 * DBUF_HASH_MUTEX > db_mtx. 188 */ 189 ASSERT(refcount_is_zero(&db->db_holds)); 190 ASSERT(db->db_state == DB_EVICTING); 191 ASSERT(!MUTEX_HELD(&db->db_mtx)); 192 193 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 194 dbp = &h->hash_table[idx]; 195 while ((dbf = *dbp) != db) { 196 dbp = &dbf->db_hash_next; 197 ASSERT(dbf != NULL); 198 } 199 *dbp = db->db_hash_next; 200 db->db_hash_next = NULL; 201 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 202 atomic_add_64(&dbuf_hash_count, -1); 203 } 204 205 static arc_evict_func_t dbuf_do_evict; 206 207 static void 208 dbuf_evict_user(dmu_buf_impl_t *db) 209 { 210 ASSERT(MUTEX_HELD(&db->db_mtx)); 211 212 if (db->db_level != 0 || db->db_evict_func == NULL) 213 return; 214 215 if (db->db_user_data_ptr_ptr) 216 *db->db_user_data_ptr_ptr = db->db.db_data; 217 db->db_evict_func(&db->db, db->db_user_ptr); 218 db->db_user_ptr = NULL; 219 db->db_user_data_ptr_ptr = NULL; 220 db->db_evict_func = NULL; 221 } 222 223 void 224 dbuf_evict(dmu_buf_impl_t *db) 225 { 226 ASSERT(MUTEX_HELD(&db->db_mtx)); 227 ASSERT(db->db_buf == NULL); 228 ASSERT(db->db_data_pending == NULL); 229 230 dbuf_clear(db); 231 dbuf_destroy(db); 232 } 233 234 void 235 dbuf_init(void) 236 { 237 uint64_t hsize = 1ULL << 16; 238 dbuf_hash_table_t *h = &dbuf_hash_table; 239 int i; 240 241 /* 242 * The hash table is big enough to fill all of physical memory 243 * with an average 4K block size. The table will take up 244 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 245 */ 246 while (hsize * 4096 < physmem * PAGESIZE) 247 hsize <<= 1; 248 249 retry: 250 h->hash_table_mask = hsize - 1; 251 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 252 if (h->hash_table == NULL) { 253 /* XXX - we should really return an error instead of assert */ 254 ASSERT(hsize > (1ULL << 10)); 255 hsize >>= 1; 256 goto retry; 257 } 258 259 dbuf_cache = kmem_cache_create("dmu_buf_impl_t", 260 sizeof (dmu_buf_impl_t), 261 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 262 263 for (i = 0; i < DBUF_MUTEXES; i++) 264 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 265 } 266 267 void 268 dbuf_fini(void) 269 { 270 dbuf_hash_table_t *h = &dbuf_hash_table; 271 int i; 272 273 for (i = 0; i < DBUF_MUTEXES; i++) 274 mutex_destroy(&h->hash_mutexes[i]); 275 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 276 kmem_cache_destroy(dbuf_cache); 277 } 278 279 /* 280 * Other stuff. 281 */ 282 283 #ifdef ZFS_DEBUG 284 static void 285 dbuf_verify(dmu_buf_impl_t *db) 286 { 287 dnode_t *dn = db->db_dnode; 288 289 ASSERT(MUTEX_HELD(&db->db_mtx)); 290 291 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 292 return; 293 294 ASSERT(db->db_objset != NULL); 295 if (dn == NULL) { 296 ASSERT(db->db_parent == NULL); 297 ASSERT(db->db_blkptr == NULL); 298 } else { 299 ASSERT3U(db->db.db_object, ==, dn->dn_object); 300 ASSERT3P(db->db_objset, ==, dn->dn_objset); 301 ASSERT3U(db->db_level, <, dn->dn_nlevels); 302 ASSERT(db->db_blkid == DB_BONUS_BLKID || 303 list_head(&dn->dn_dbufs)); 304 } 305 if (db->db_blkid == DB_BONUS_BLKID) { 306 ASSERT(dn != NULL); 307 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 308 ASSERT3U(db->db.db_offset, ==, DB_BONUS_BLKID); 309 } else { 310 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 311 } 312 313 /* 314 * We can't assert that db_size matches dn_datablksz because it 315 * can be momentarily different when another thread is doing 316 * dnode_set_blksz(). 317 */ 318 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 319 dbuf_dirty_record_t *dr = db->db_data_pending; 320 /* 321 * It should only be modified in syncing context, so 322 * make sure we only have one copy of the data. 323 */ 324 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 325 } 326 327 /* verify db->db_blkptr */ 328 if (db->db_blkptr) { 329 if (db->db_parent == dn->dn_dbuf) { 330 /* db is pointed to by the dnode */ 331 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 332 if (db->db.db_object == DMU_META_DNODE_OBJECT) 333 ASSERT(db->db_parent == NULL); 334 else 335 ASSERT(db->db_parent != NULL); 336 ASSERT3P(db->db_blkptr, ==, 337 &dn->dn_phys->dn_blkptr[db->db_blkid]); 338 } else { 339 /* db is pointed to by an indirect block */ 340 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 341 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 342 ASSERT3U(db->db_parent->db.db_object, ==, 343 db->db.db_object); 344 /* 345 * dnode_grow_indblksz() can make this fail if we don't 346 * have the struct_rwlock. XXX indblksz no longer 347 * grows. safe to do this now? 348 */ 349 if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) { 350 ASSERT3P(db->db_blkptr, ==, 351 ((blkptr_t *)db->db_parent->db.db_data + 352 db->db_blkid % epb)); 353 } 354 } 355 } 356 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 357 db->db.db_data && db->db_blkid != DB_BONUS_BLKID && 358 db->db_state != DB_FILL && !dn->dn_free_txg) { 359 /* 360 * If the blkptr isn't set but they have nonzero data, 361 * it had better be dirty, otherwise we'll lose that 362 * data when we evict this buffer. 363 */ 364 if (db->db_dirtycnt == 0) { 365 uint64_t *buf = db->db.db_data; 366 int i; 367 368 for (i = 0; i < db->db.db_size >> 3; i++) { 369 ASSERT(buf[i] == 0); 370 } 371 } 372 } 373 } 374 #endif 375 376 static void 377 dbuf_update_data(dmu_buf_impl_t *db) 378 { 379 ASSERT(MUTEX_HELD(&db->db_mtx)); 380 if (db->db_level == 0 && db->db_user_data_ptr_ptr) { 381 ASSERT(!refcount_is_zero(&db->db_holds)); 382 *db->db_user_data_ptr_ptr = db->db.db_data; 383 } 384 } 385 386 static void 387 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 388 { 389 ASSERT(MUTEX_HELD(&db->db_mtx)); 390 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf)); 391 db->db_buf = buf; 392 if (buf != NULL) { 393 ASSERT(buf->b_data != NULL); 394 db->db.db_data = buf->b_data; 395 if (!arc_released(buf)) 396 arc_set_callback(buf, dbuf_do_evict, db); 397 dbuf_update_data(db); 398 } else { 399 dbuf_evict_user(db); 400 db->db.db_data = NULL; 401 if (db->db_state != DB_NOFILL) 402 db->db_state = DB_UNCACHED; 403 } 404 } 405 406 uint64_t 407 dbuf_whichblock(dnode_t *dn, uint64_t offset) 408 { 409 if (dn->dn_datablkshift) { 410 return (offset >> dn->dn_datablkshift); 411 } else { 412 ASSERT3U(offset, <, dn->dn_datablksz); 413 return (0); 414 } 415 } 416 417 static void 418 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 419 { 420 dmu_buf_impl_t *db = vdb; 421 422 mutex_enter(&db->db_mtx); 423 ASSERT3U(db->db_state, ==, DB_READ); 424 /* 425 * All reads are synchronous, so we must have a hold on the dbuf 426 */ 427 ASSERT(refcount_count(&db->db_holds) > 0); 428 ASSERT(db->db_buf == NULL); 429 ASSERT(db->db.db_data == NULL); 430 if (db->db_level == 0 && db->db_freed_in_flight) { 431 /* we were freed in flight; disregard any error */ 432 arc_release(buf, db); 433 bzero(buf->b_data, db->db.db_size); 434 arc_buf_freeze(buf); 435 db->db_freed_in_flight = FALSE; 436 dbuf_set_data(db, buf); 437 db->db_state = DB_CACHED; 438 } else if (zio == NULL || zio->io_error == 0) { 439 dbuf_set_data(db, buf); 440 db->db_state = DB_CACHED; 441 } else { 442 ASSERT(db->db_blkid != DB_BONUS_BLKID); 443 ASSERT3P(db->db_buf, ==, NULL); 444 VERIFY(arc_buf_remove_ref(buf, db) == 1); 445 db->db_state = DB_UNCACHED; 446 } 447 cv_broadcast(&db->db_changed); 448 mutex_exit(&db->db_mtx); 449 dbuf_rele(db, NULL); 450 } 451 452 static void 453 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) 454 { 455 dnode_t *dn = db->db_dnode; 456 zbookmark_t zb; 457 uint32_t aflags = ARC_NOWAIT; 458 arc_buf_t *pbuf; 459 460 ASSERT(!refcount_is_zero(&db->db_holds)); 461 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 462 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 463 ASSERT(MUTEX_HELD(&db->db_mtx)); 464 ASSERT(db->db_state == DB_UNCACHED); 465 ASSERT(db->db_buf == NULL); 466 467 if (db->db_blkid == DB_BONUS_BLKID) { 468 int bonuslen = dn->dn_bonuslen; 469 470 ASSERT3U(bonuslen, <=, db->db.db_size); 471 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 472 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 473 if (bonuslen < DN_MAX_BONUSLEN) 474 bzero(db->db.db_data, DN_MAX_BONUSLEN); 475 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, 476 bonuslen); 477 dbuf_update_data(db); 478 db->db_state = DB_CACHED; 479 mutex_exit(&db->db_mtx); 480 return; 481 } 482 483 /* 484 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 485 * processes the delete record and clears the bp while we are waiting 486 * for the dn_mtx (resulting in a "no" from block_freed). 487 */ 488 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 489 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 490 BP_IS_HOLE(db->db_blkptr)))) { 491 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 492 493 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa, 494 db->db.db_size, db, type)); 495 bzero(db->db.db_data, db->db.db_size); 496 db->db_state = DB_CACHED; 497 *flags |= DB_RF_CACHED; 498 mutex_exit(&db->db_mtx); 499 return; 500 } 501 502 db->db_state = DB_READ; 503 mutex_exit(&db->db_mtx); 504 505 if (DBUF_IS_L2CACHEABLE(db)) 506 aflags |= ARC_L2CACHE; 507 508 zb.zb_objset = db->db_objset->os_dsl_dataset ? 509 db->db_objset->os_dsl_dataset->ds_object : 0; 510 zb.zb_object = db->db.db_object; 511 zb.zb_level = db->db_level; 512 zb.zb_blkid = db->db_blkid; 513 514 dbuf_add_ref(db, NULL); 515 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */ 516 517 if (db->db_parent) 518 pbuf = db->db_parent->db_buf; 519 else 520 pbuf = db->db_objset->os_phys_buf; 521 522 (void) arc_read(zio, dn->dn_objset->os_spa, db->db_blkptr, pbuf, 523 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 524 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 525 &aflags, &zb); 526 if (aflags & ARC_CACHED) 527 *flags |= DB_RF_CACHED; 528 } 529 530 int 531 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 532 { 533 int err = 0; 534 int havepzio = (zio != NULL); 535 int prefetch; 536 537 /* 538 * We don't have to hold the mutex to check db_state because it 539 * can't be freed while we have a hold on the buffer. 540 */ 541 ASSERT(!refcount_is_zero(&db->db_holds)); 542 543 if (db->db_state == DB_NOFILL) 544 return (EIO); 545 546 if ((flags & DB_RF_HAVESTRUCT) == 0) 547 rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER); 548 549 prefetch = db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && 550 (flags & DB_RF_NOPREFETCH) == 0 && db->db_dnode != NULL && 551 DBUF_IS_CACHEABLE(db); 552 553 mutex_enter(&db->db_mtx); 554 if (db->db_state == DB_CACHED) { 555 mutex_exit(&db->db_mtx); 556 if (prefetch) 557 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, 558 db->db.db_size, TRUE); 559 if ((flags & DB_RF_HAVESTRUCT) == 0) 560 rw_exit(&db->db_dnode->dn_struct_rwlock); 561 } else if (db->db_state == DB_UNCACHED) { 562 if (zio == NULL) { 563 zio = zio_root(db->db_dnode->dn_objset->os_spa, 564 NULL, NULL, ZIO_FLAG_CANFAIL); 565 } 566 dbuf_read_impl(db, zio, &flags); 567 568 /* dbuf_read_impl has dropped db_mtx for us */ 569 570 if (prefetch) 571 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, 572 db->db.db_size, flags & DB_RF_CACHED); 573 574 if ((flags & DB_RF_HAVESTRUCT) == 0) 575 rw_exit(&db->db_dnode->dn_struct_rwlock); 576 577 if (!havepzio) 578 err = zio_wait(zio); 579 } else { 580 mutex_exit(&db->db_mtx); 581 if (prefetch) 582 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, 583 db->db.db_size, TRUE); 584 if ((flags & DB_RF_HAVESTRUCT) == 0) 585 rw_exit(&db->db_dnode->dn_struct_rwlock); 586 587 mutex_enter(&db->db_mtx); 588 if ((flags & DB_RF_NEVERWAIT) == 0) { 589 while (db->db_state == DB_READ || 590 db->db_state == DB_FILL) { 591 ASSERT(db->db_state == DB_READ || 592 (flags & DB_RF_HAVESTRUCT) == 0); 593 cv_wait(&db->db_changed, &db->db_mtx); 594 } 595 if (db->db_state == DB_UNCACHED) 596 err = EIO; 597 } 598 mutex_exit(&db->db_mtx); 599 } 600 601 ASSERT(err || havepzio || db->db_state == DB_CACHED); 602 return (err); 603 } 604 605 static void 606 dbuf_noread(dmu_buf_impl_t *db) 607 { 608 ASSERT(!refcount_is_zero(&db->db_holds)); 609 ASSERT(db->db_blkid != DB_BONUS_BLKID); 610 mutex_enter(&db->db_mtx); 611 while (db->db_state == DB_READ || db->db_state == DB_FILL) 612 cv_wait(&db->db_changed, &db->db_mtx); 613 if (db->db_state == DB_UNCACHED) { 614 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 615 616 ASSERT(db->db_buf == NULL); 617 ASSERT(db->db.db_data == NULL); 618 dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa, 619 db->db.db_size, db, type)); 620 db->db_state = DB_FILL; 621 } else if (db->db_state == DB_NOFILL) { 622 dbuf_set_data(db, NULL); 623 } else { 624 ASSERT3U(db->db_state, ==, DB_CACHED); 625 } 626 mutex_exit(&db->db_mtx); 627 } 628 629 /* 630 * This is our just-in-time copy function. It makes a copy of 631 * buffers, that have been modified in a previous transaction 632 * group, before we modify them in the current active group. 633 * 634 * This function is used in two places: when we are dirtying a 635 * buffer for the first time in a txg, and when we are freeing 636 * a range in a dnode that includes this buffer. 637 * 638 * Note that when we are called from dbuf_free_range() we do 639 * not put a hold on the buffer, we just traverse the active 640 * dbuf list for the dnode. 641 */ 642 static void 643 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 644 { 645 dbuf_dirty_record_t *dr = db->db_last_dirty; 646 647 ASSERT(MUTEX_HELD(&db->db_mtx)); 648 ASSERT(db->db.db_data != NULL); 649 ASSERT(db->db_level == 0); 650 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 651 652 if (dr == NULL || 653 (dr->dt.dl.dr_data != 654 ((db->db_blkid == DB_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 655 return; 656 657 /* 658 * If the last dirty record for this dbuf has not yet synced 659 * and its referencing the dbuf data, either: 660 * reset the reference to point to a new copy, 661 * or (if there a no active holders) 662 * just null out the current db_data pointer. 663 */ 664 ASSERT(dr->dr_txg >= txg - 2); 665 if (db->db_blkid == DB_BONUS_BLKID) { 666 /* Note that the data bufs here are zio_bufs */ 667 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 668 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 669 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 670 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 671 int size = db->db.db_size; 672 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 673 dr->dt.dl.dr_data = arc_buf_alloc( 674 db->db_dnode->dn_objset->os_spa, size, db, type); 675 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 676 } else { 677 dbuf_set_data(db, NULL); 678 } 679 } 680 681 void 682 dbuf_unoverride(dbuf_dirty_record_t *dr) 683 { 684 dmu_buf_impl_t *db = dr->dr_dbuf; 685 uint64_t txg = dr->dr_txg; 686 687 ASSERT(MUTEX_HELD(&db->db_mtx)); 688 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 689 ASSERT(db->db_level == 0); 690 691 if (db->db_blkid == DB_BONUS_BLKID || 692 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 693 return; 694 695 /* free this block */ 696 if (!BP_IS_HOLE(&dr->dt.dl.dr_overridden_by)) { 697 /* XXX can get silent EIO here */ 698 (void) dsl_free(NULL, 699 spa_get_dsl(db->db_dnode->dn_objset->os_spa), 700 txg, &dr->dt.dl.dr_overridden_by, NULL, NULL, ARC_WAIT); 701 } 702 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 703 /* 704 * Release the already-written buffer, so we leave it in 705 * a consistent dirty state. Note that all callers are 706 * modifying the buffer, so they will immediately do 707 * another (redundant) arc_release(). Therefore, leave 708 * the buf thawed to save the effort of freezing & 709 * immediately re-thawing it. 710 */ 711 arc_release(dr->dt.dl.dr_data, db); 712 } 713 714 /* 715 * Evict (if its unreferenced) or clear (if its referenced) any level-0 716 * data blocks in the free range, so that any future readers will find 717 * empty blocks. Also, if we happen accross any level-1 dbufs in the 718 * range that have not already been marked dirty, mark them dirty so 719 * they stay in memory. 720 */ 721 void 722 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx) 723 { 724 dmu_buf_impl_t *db, *db_next; 725 uint64_t txg = tx->tx_txg; 726 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 727 uint64_t first_l1 = start >> epbs; 728 uint64_t last_l1 = end >> epbs; 729 730 if (end > dn->dn_maxblkid) { 731 end = dn->dn_maxblkid; 732 last_l1 = end >> epbs; 733 } 734 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end); 735 mutex_enter(&dn->dn_dbufs_mtx); 736 for (db = list_head(&dn->dn_dbufs); db; db = db_next) { 737 db_next = list_next(&dn->dn_dbufs, db); 738 ASSERT(db->db_blkid != DB_BONUS_BLKID); 739 740 if (db->db_level == 1 && 741 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) { 742 mutex_enter(&db->db_mtx); 743 if (db->db_last_dirty && 744 db->db_last_dirty->dr_txg < txg) { 745 dbuf_add_ref(db, FTAG); 746 mutex_exit(&db->db_mtx); 747 dbuf_will_dirty(db, tx); 748 dbuf_rele(db, FTAG); 749 } else { 750 mutex_exit(&db->db_mtx); 751 } 752 } 753 754 if (db->db_level != 0) 755 continue; 756 dprintf_dbuf(db, "found buf %s\n", ""); 757 if (db->db_blkid < start || db->db_blkid > end) 758 continue; 759 760 /* found a level 0 buffer in the range */ 761 if (dbuf_undirty(db, tx)) 762 continue; 763 764 mutex_enter(&db->db_mtx); 765 if (db->db_state == DB_UNCACHED || 766 db->db_state == DB_NOFILL || 767 db->db_state == DB_EVICTING) { 768 ASSERT(db->db.db_data == NULL); 769 mutex_exit(&db->db_mtx); 770 continue; 771 } 772 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 773 /* will be handled in dbuf_read_done or dbuf_rele */ 774 db->db_freed_in_flight = TRUE; 775 mutex_exit(&db->db_mtx); 776 continue; 777 } 778 if (refcount_count(&db->db_holds) == 0) { 779 ASSERT(db->db_buf); 780 dbuf_clear(db); 781 continue; 782 } 783 /* The dbuf is referenced */ 784 785 if (db->db_last_dirty != NULL) { 786 dbuf_dirty_record_t *dr = db->db_last_dirty; 787 788 if (dr->dr_txg == txg) { 789 /* 790 * This buffer is "in-use", re-adjust the file 791 * size to reflect that this buffer may 792 * contain new data when we sync. 793 */ 794 if (db->db_blkid > dn->dn_maxblkid) 795 dn->dn_maxblkid = db->db_blkid; 796 dbuf_unoverride(dr); 797 } else { 798 /* 799 * This dbuf is not dirty in the open context. 800 * Either uncache it (if its not referenced in 801 * the open context) or reset its contents to 802 * empty. 803 */ 804 dbuf_fix_old_data(db, txg); 805 } 806 } 807 /* clear the contents if its cached */ 808 if (db->db_state == DB_CACHED) { 809 ASSERT(db->db.db_data != NULL); 810 arc_release(db->db_buf, db); 811 bzero(db->db.db_data, db->db.db_size); 812 arc_buf_freeze(db->db_buf); 813 } 814 815 mutex_exit(&db->db_mtx); 816 } 817 mutex_exit(&dn->dn_dbufs_mtx); 818 } 819 820 static int 821 dbuf_block_freeable(dmu_buf_impl_t *db) 822 { 823 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; 824 uint64_t birth_txg = 0; 825 826 /* 827 * We don't need any locking to protect db_blkptr: 828 * If it's syncing, then db_last_dirty will be set 829 * so we'll ignore db_blkptr. 830 */ 831 ASSERT(MUTEX_HELD(&db->db_mtx)); 832 if (db->db_last_dirty) 833 birth_txg = db->db_last_dirty->dr_txg; 834 else if (db->db_blkptr) 835 birth_txg = db->db_blkptr->blk_birth; 836 837 /* If we don't exist or are in a snapshot, we can't be freed */ 838 if (birth_txg) 839 return (ds == NULL || 840 dsl_dataset_block_freeable(ds, birth_txg)); 841 else 842 return (FALSE); 843 } 844 845 void 846 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 847 { 848 arc_buf_t *buf, *obuf; 849 int osize = db->db.db_size; 850 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 851 852 ASSERT(db->db_blkid != DB_BONUS_BLKID); 853 854 /* XXX does *this* func really need the lock? */ 855 ASSERT(RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)); 856 857 /* 858 * This call to dbuf_will_dirty() with the dn_struct_rwlock held 859 * is OK, because there can be no other references to the db 860 * when we are changing its size, so no concurrent DB_FILL can 861 * be happening. 862 */ 863 /* 864 * XXX we should be doing a dbuf_read, checking the return 865 * value and returning that up to our callers 866 */ 867 dbuf_will_dirty(db, tx); 868 869 /* create the data buffer for the new block */ 870 buf = arc_buf_alloc(db->db_dnode->dn_objset->os_spa, size, db, type); 871 872 /* copy old block data to the new block */ 873 obuf = db->db_buf; 874 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 875 /* zero the remainder */ 876 if (size > osize) 877 bzero((uint8_t *)buf->b_data + osize, size - osize); 878 879 mutex_enter(&db->db_mtx); 880 dbuf_set_data(db, buf); 881 VERIFY(arc_buf_remove_ref(obuf, db) == 1); 882 db->db.db_size = size; 883 884 if (db->db_level == 0) { 885 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 886 db->db_last_dirty->dt.dl.dr_data = buf; 887 } 888 mutex_exit(&db->db_mtx); 889 890 dnode_willuse_space(db->db_dnode, size-osize, tx); 891 } 892 893 dbuf_dirty_record_t * 894 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 895 { 896 dnode_t *dn = db->db_dnode; 897 objset_impl_t *os = dn->dn_objset; 898 dbuf_dirty_record_t **drp, *dr; 899 int drop_struct_lock = FALSE; 900 boolean_t do_free_accounting = B_FALSE; 901 int txgoff = tx->tx_txg & TXG_MASK; 902 903 ASSERT(tx->tx_txg != 0); 904 ASSERT(!refcount_is_zero(&db->db_holds)); 905 DMU_TX_DIRTY_BUF(tx, db); 906 907 /* 908 * Shouldn't dirty a regular buffer in syncing context. Private 909 * objects may be dirtied in syncing context, but only if they 910 * were already pre-dirtied in open context. 911 * XXX We may want to prohibit dirtying in syncing context even 912 * if they did pre-dirty. 913 */ 914 ASSERT(!dmu_tx_is_syncing(tx) || 915 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 916 dn->dn_object == DMU_META_DNODE_OBJECT || 917 dn->dn_objset->os_dsl_dataset == NULL || 918 dsl_dir_is_private(dn->dn_objset->os_dsl_dataset->ds_dir)); 919 920 /* 921 * We make this assert for private objects as well, but after we 922 * check if we're already dirty. They are allowed to re-dirty 923 * in syncing context. 924 */ 925 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 926 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 927 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 928 929 mutex_enter(&db->db_mtx); 930 /* 931 * XXX make this true for indirects too? The problem is that 932 * transactions created with dmu_tx_create_assigned() from 933 * syncing context don't bother holding ahead. 934 */ 935 ASSERT(db->db_level != 0 || 936 db->db_state == DB_CACHED || db->db_state == DB_FILL || 937 db->db_state == DB_NOFILL); 938 939 mutex_enter(&dn->dn_mtx); 940 /* 941 * Don't set dirtyctx to SYNC if we're just modifying this as we 942 * initialize the objset. 943 */ 944 if (dn->dn_dirtyctx == DN_UNDIRTIED && 945 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 946 dn->dn_dirtyctx = 947 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); 948 ASSERT(dn->dn_dirtyctx_firstset == NULL); 949 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 950 } 951 mutex_exit(&dn->dn_mtx); 952 953 /* 954 * If this buffer is already dirty, we're done. 955 */ 956 drp = &db->db_last_dirty; 957 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 958 db->db.db_object == DMU_META_DNODE_OBJECT); 959 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 960 drp = &dr->dr_next; 961 if (dr && dr->dr_txg == tx->tx_txg) { 962 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) { 963 /* 964 * If this buffer has already been written out, 965 * we now need to reset its state. 966 */ 967 dbuf_unoverride(dr); 968 if (db->db.db_object != DMU_META_DNODE_OBJECT) 969 arc_buf_thaw(db->db_buf); 970 } 971 mutex_exit(&db->db_mtx); 972 return (dr); 973 } 974 975 /* 976 * Only valid if not already dirty. 977 */ 978 ASSERT(dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 979 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 980 981 ASSERT3U(dn->dn_nlevels, >, db->db_level); 982 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 983 dn->dn_phys->dn_nlevels > db->db_level || 984 dn->dn_next_nlevels[txgoff] > db->db_level || 985 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 986 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 987 988 /* 989 * We should only be dirtying in syncing context if it's the 990 * mos, a spa os, or we're initializing the os. However, we are 991 * allowed to dirty in syncing context provided we already 992 * dirtied it in open context. Hence we must make this 993 * assertion only if we're not already dirty. 994 */ 995 ASSERT(!dmu_tx_is_syncing(tx) || 996 os->os_dsl_dataset == NULL || 997 !dsl_dir_is_private(os->os_dsl_dataset->ds_dir) || 998 !BP_IS_HOLE(os->os_rootbp)); 999 ASSERT(db->db.db_size != 0); 1000 1001 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1002 1003 if (db->db_blkid != DB_BONUS_BLKID) { 1004 /* 1005 * Update the accounting. 1006 * Note: we delay "free accounting" until after we drop 1007 * the db_mtx. This keeps us from grabbing other locks 1008 * (and possibly deadlocking) in bp_get_dasize() while 1009 * also holding the db_mtx. 1010 */ 1011 dnode_willuse_space(dn, db->db.db_size, tx); 1012 do_free_accounting = dbuf_block_freeable(db); 1013 } 1014 1015 /* 1016 * If this buffer is dirty in an old transaction group we need 1017 * to make a copy of it so that the changes we make in this 1018 * transaction group won't leak out when we sync the older txg. 1019 */ 1020 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1021 if (db->db_level == 0) { 1022 void *data_old = db->db_buf; 1023 1024 if (db->db_state != DB_NOFILL) { 1025 if (db->db_blkid == DB_BONUS_BLKID) { 1026 dbuf_fix_old_data(db, tx->tx_txg); 1027 data_old = db->db.db_data; 1028 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1029 /* 1030 * Release the data buffer from the cache so 1031 * that we can modify it without impacting 1032 * possible other users of this cached data 1033 * block. Note that indirect blocks and 1034 * private objects are not released until the 1035 * syncing state (since they are only modified 1036 * then). 1037 */ 1038 arc_release(db->db_buf, db); 1039 dbuf_fix_old_data(db, tx->tx_txg); 1040 data_old = db->db_buf; 1041 } 1042 ASSERT(data_old != NULL); 1043 } 1044 dr->dt.dl.dr_data = data_old; 1045 } else { 1046 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1047 list_create(&dr->dt.di.dr_children, 1048 sizeof (dbuf_dirty_record_t), 1049 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1050 } 1051 dr->dr_dbuf = db; 1052 dr->dr_txg = tx->tx_txg; 1053 dr->dr_next = *drp; 1054 *drp = dr; 1055 1056 /* 1057 * We could have been freed_in_flight between the dbuf_noread 1058 * and dbuf_dirty. We win, as though the dbuf_noread() had 1059 * happened after the free. 1060 */ 1061 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) { 1062 mutex_enter(&dn->dn_mtx); 1063 dnode_clear_range(dn, db->db_blkid, 1, tx); 1064 mutex_exit(&dn->dn_mtx); 1065 db->db_freed_in_flight = FALSE; 1066 } 1067 1068 /* 1069 * This buffer is now part of this txg 1070 */ 1071 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1072 db->db_dirtycnt += 1; 1073 ASSERT3U(db->db_dirtycnt, <=, 3); 1074 1075 mutex_exit(&db->db_mtx); 1076 1077 if (db->db_blkid == DB_BONUS_BLKID) { 1078 mutex_enter(&dn->dn_mtx); 1079 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1080 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1081 mutex_exit(&dn->dn_mtx); 1082 dnode_setdirty(dn, tx); 1083 return (dr); 1084 } else if (do_free_accounting) { 1085 blkptr_t *bp = db->db_blkptr; 1086 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? 1087 bp_get_dasize(os->os_spa, bp) : db->db.db_size; 1088 /* 1089 * This is only a guess -- if the dbuf is dirty 1090 * in a previous txg, we don't know how much 1091 * space it will use on disk yet. We should 1092 * really have the struct_rwlock to access 1093 * db_blkptr, but since this is just a guess, 1094 * it's OK if we get an odd answer. 1095 */ 1096 dnode_willuse_space(dn, -willfree, tx); 1097 } 1098 1099 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1100 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1101 drop_struct_lock = TRUE; 1102 } 1103 1104 if (db->db_level == 0) { 1105 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1106 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1107 } 1108 1109 if (db->db_level+1 < dn->dn_nlevels) { 1110 dmu_buf_impl_t *parent = db->db_parent; 1111 dbuf_dirty_record_t *di; 1112 int parent_held = FALSE; 1113 1114 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1115 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1116 1117 parent = dbuf_hold_level(dn, db->db_level+1, 1118 db->db_blkid >> epbs, FTAG); 1119 parent_held = TRUE; 1120 } 1121 if (drop_struct_lock) 1122 rw_exit(&dn->dn_struct_rwlock); 1123 ASSERT3U(db->db_level+1, ==, parent->db_level); 1124 di = dbuf_dirty(parent, tx); 1125 if (parent_held) 1126 dbuf_rele(parent, FTAG); 1127 1128 mutex_enter(&db->db_mtx); 1129 /* possible race with dbuf_undirty() */ 1130 if (db->db_last_dirty == dr || 1131 dn->dn_object == DMU_META_DNODE_OBJECT) { 1132 mutex_enter(&di->dt.di.dr_mtx); 1133 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1134 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1135 list_insert_tail(&di->dt.di.dr_children, dr); 1136 mutex_exit(&di->dt.di.dr_mtx); 1137 dr->dr_parent = di; 1138 } 1139 mutex_exit(&db->db_mtx); 1140 } else { 1141 ASSERT(db->db_level+1 == dn->dn_nlevels); 1142 ASSERT(db->db_blkid < dn->dn_nblkptr); 1143 ASSERT(db->db_parent == NULL || 1144 db->db_parent == db->db_dnode->dn_dbuf); 1145 mutex_enter(&dn->dn_mtx); 1146 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1147 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1148 mutex_exit(&dn->dn_mtx); 1149 if (drop_struct_lock) 1150 rw_exit(&dn->dn_struct_rwlock); 1151 } 1152 1153 dnode_setdirty(dn, tx); 1154 return (dr); 1155 } 1156 1157 static int 1158 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1159 { 1160 dnode_t *dn = db->db_dnode; 1161 uint64_t txg = tx->tx_txg; 1162 dbuf_dirty_record_t *dr, **drp; 1163 1164 ASSERT(txg != 0); 1165 ASSERT(db->db_blkid != DB_BONUS_BLKID); 1166 1167 mutex_enter(&db->db_mtx); 1168 1169 /* 1170 * If this buffer is not dirty, we're done. 1171 */ 1172 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1173 if (dr->dr_txg <= txg) 1174 break; 1175 if (dr == NULL || dr->dr_txg < txg) { 1176 mutex_exit(&db->db_mtx); 1177 return (0); 1178 } 1179 ASSERT(dr->dr_txg == txg); 1180 1181 /* 1182 * If this buffer is currently held, we cannot undirty 1183 * it, since one of the current holders may be in the 1184 * middle of an update. Note that users of dbuf_undirty() 1185 * should not place a hold on the dbuf before the call. 1186 */ 1187 if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 1188 mutex_exit(&db->db_mtx); 1189 /* Make sure we don't toss this buffer at sync phase */ 1190 mutex_enter(&dn->dn_mtx); 1191 dnode_clear_range(dn, db->db_blkid, 1, tx); 1192 mutex_exit(&dn->dn_mtx); 1193 return (0); 1194 } 1195 1196 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1197 1198 ASSERT(db->db.db_size != 0); 1199 1200 /* XXX would be nice to fix up dn_towrite_space[] */ 1201 1202 *drp = dr->dr_next; 1203 1204 if (dr->dr_parent) { 1205 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1206 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1207 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1208 } else if (db->db_level+1 == dn->dn_nlevels) { 1209 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1210 mutex_enter(&dn->dn_mtx); 1211 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1212 mutex_exit(&dn->dn_mtx); 1213 } 1214 1215 if (db->db_level == 0) { 1216 if (db->db_state != DB_NOFILL) { 1217 dbuf_unoverride(dr); 1218 1219 ASSERT(db->db_buf != NULL); 1220 ASSERT(dr->dt.dl.dr_data != NULL); 1221 if (dr->dt.dl.dr_data != db->db_buf) 1222 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 1223 db) == 1); 1224 } 1225 } else { 1226 ASSERT(db->db_buf != NULL); 1227 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 1228 mutex_destroy(&dr->dt.di.dr_mtx); 1229 list_destroy(&dr->dt.di.dr_children); 1230 } 1231 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1232 1233 ASSERT(db->db_dirtycnt > 0); 1234 db->db_dirtycnt -= 1; 1235 1236 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1237 arc_buf_t *buf = db->db_buf; 1238 1239 ASSERT(arc_released(buf)); 1240 dbuf_set_data(db, NULL); 1241 VERIFY(arc_buf_remove_ref(buf, db) == 1); 1242 dbuf_evict(db); 1243 return (1); 1244 } 1245 1246 mutex_exit(&db->db_mtx); 1247 return (0); 1248 } 1249 1250 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty 1251 void 1252 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1253 { 1254 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1255 1256 ASSERT(tx->tx_txg != 0); 1257 ASSERT(!refcount_is_zero(&db->db_holds)); 1258 1259 if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) 1260 rf |= DB_RF_HAVESTRUCT; 1261 (void) dbuf_read(db, NULL, rf); 1262 (void) dbuf_dirty(db, tx); 1263 } 1264 1265 void 1266 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1267 { 1268 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1269 1270 db->db_state = DB_NOFILL; 1271 1272 dmu_buf_will_fill(db_fake, tx); 1273 } 1274 1275 void 1276 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1277 { 1278 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1279 1280 ASSERT(db->db_blkid != DB_BONUS_BLKID); 1281 ASSERT(tx->tx_txg != 0); 1282 ASSERT(db->db_level == 0); 1283 ASSERT(!refcount_is_zero(&db->db_holds)); 1284 1285 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1286 dmu_tx_private_ok(tx)); 1287 1288 dbuf_noread(db); 1289 (void) dbuf_dirty(db, tx); 1290 } 1291 1292 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1293 /* ARGSUSED */ 1294 void 1295 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1296 { 1297 mutex_enter(&db->db_mtx); 1298 DBUF_VERIFY(db); 1299 1300 if (db->db_state == DB_FILL) { 1301 if (db->db_level == 0 && db->db_freed_in_flight) { 1302 ASSERT(db->db_blkid != DB_BONUS_BLKID); 1303 /* we were freed while filling */ 1304 /* XXX dbuf_undirty? */ 1305 bzero(db->db.db_data, db->db.db_size); 1306 db->db_freed_in_flight = FALSE; 1307 } 1308 db->db_state = DB_CACHED; 1309 cv_broadcast(&db->db_changed); 1310 } 1311 mutex_exit(&db->db_mtx); 1312 } 1313 1314 /* 1315 * "Clear" the contents of this dbuf. This will mark the dbuf 1316 * EVICTING and clear *most* of its references. Unfortunetely, 1317 * when we are not holding the dn_dbufs_mtx, we can't clear the 1318 * entry in the dn_dbufs list. We have to wait until dbuf_destroy() 1319 * in this case. For callers from the DMU we will usually see: 1320 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() 1321 * For the arc callback, we will usually see: 1322 * dbuf_do_evict()->dbuf_clear();dbuf_destroy() 1323 * Sometimes, though, we will get a mix of these two: 1324 * DMU: dbuf_clear()->arc_buf_evict() 1325 * ARC: dbuf_do_evict()->dbuf_destroy() 1326 */ 1327 void 1328 dbuf_clear(dmu_buf_impl_t *db) 1329 { 1330 dnode_t *dn = db->db_dnode; 1331 dmu_buf_impl_t *parent = db->db_parent; 1332 dmu_buf_impl_t *dndb = dn->dn_dbuf; 1333 int dbuf_gone = FALSE; 1334 1335 ASSERT(MUTEX_HELD(&db->db_mtx)); 1336 ASSERT(refcount_is_zero(&db->db_holds)); 1337 1338 dbuf_evict_user(db); 1339 1340 if (db->db_state == DB_CACHED) { 1341 ASSERT(db->db.db_data != NULL); 1342 if (db->db_blkid == DB_BONUS_BLKID) { 1343 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 1344 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1345 } 1346 db->db.db_data = NULL; 1347 db->db_state = DB_UNCACHED; 1348 } 1349 1350 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1351 ASSERT(db->db_data_pending == NULL); 1352 1353 db->db_state = DB_EVICTING; 1354 db->db_blkptr = NULL; 1355 1356 if (db->db_blkid != DB_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { 1357 list_remove(&dn->dn_dbufs, db); 1358 dnode_rele(dn, db); 1359 db->db_dnode = NULL; 1360 } 1361 1362 if (db->db_buf) 1363 dbuf_gone = arc_buf_evict(db->db_buf); 1364 1365 if (!dbuf_gone) 1366 mutex_exit(&db->db_mtx); 1367 1368 /* 1369 * If this dbuf is referened from an indirect dbuf, 1370 * decrement the ref count on the indirect dbuf. 1371 */ 1372 if (parent && parent != dndb) 1373 dbuf_rele(parent, db); 1374 } 1375 1376 static int 1377 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 1378 dmu_buf_impl_t **parentp, blkptr_t **bpp) 1379 { 1380 int nlevels, epbs; 1381 1382 *parentp = NULL; 1383 *bpp = NULL; 1384 1385 ASSERT(blkid != DB_BONUS_BLKID); 1386 1387 if (dn->dn_phys->dn_nlevels == 0) 1388 nlevels = 1; 1389 else 1390 nlevels = dn->dn_phys->dn_nlevels; 1391 1392 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1393 1394 ASSERT3U(level * epbs, <, 64); 1395 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1396 if (level >= nlevels || 1397 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 1398 /* the buffer has no parent yet */ 1399 return (ENOENT); 1400 } else if (level < nlevels-1) { 1401 /* this block is referenced from an indirect block */ 1402 int err = dbuf_hold_impl(dn, level+1, 1403 blkid >> epbs, fail_sparse, NULL, parentp); 1404 if (err) 1405 return (err); 1406 err = dbuf_read(*parentp, NULL, 1407 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 1408 if (err) { 1409 dbuf_rele(*parentp, NULL); 1410 *parentp = NULL; 1411 return (err); 1412 } 1413 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 1414 (blkid & ((1ULL << epbs) - 1)); 1415 return (0); 1416 } else { 1417 /* the block is referenced from the dnode */ 1418 ASSERT3U(level, ==, nlevels-1); 1419 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 1420 blkid < dn->dn_phys->dn_nblkptr); 1421 if (dn->dn_dbuf) { 1422 dbuf_add_ref(dn->dn_dbuf, NULL); 1423 *parentp = dn->dn_dbuf; 1424 } 1425 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 1426 return (0); 1427 } 1428 } 1429 1430 static dmu_buf_impl_t * 1431 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 1432 dmu_buf_impl_t *parent, blkptr_t *blkptr) 1433 { 1434 objset_impl_t *os = dn->dn_objset; 1435 dmu_buf_impl_t *db, *odb; 1436 1437 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1438 ASSERT(dn->dn_type != DMU_OT_NONE); 1439 1440 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); 1441 1442 db->db_objset = os; 1443 db->db.db_object = dn->dn_object; 1444 db->db_level = level; 1445 db->db_blkid = blkid; 1446 db->db_last_dirty = NULL; 1447 db->db_dirtycnt = 0; 1448 db->db_dnode = dn; 1449 db->db_parent = parent; 1450 db->db_blkptr = blkptr; 1451 1452 db->db_user_ptr = NULL; 1453 db->db_user_data_ptr_ptr = NULL; 1454 db->db_evict_func = NULL; 1455 db->db_immediate_evict = 0; 1456 db->db_freed_in_flight = 0; 1457 1458 if (blkid == DB_BONUS_BLKID) { 1459 ASSERT3P(parent, ==, dn->dn_dbuf); 1460 db->db.db_size = DN_MAX_BONUSLEN - 1461 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 1462 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1463 db->db.db_offset = DB_BONUS_BLKID; 1464 db->db_state = DB_UNCACHED; 1465 /* the bonus dbuf is not placed in the hash table */ 1466 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1467 return (db); 1468 } else { 1469 int blocksize = 1470 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz; 1471 db->db.db_size = blocksize; 1472 db->db.db_offset = db->db_blkid * blocksize; 1473 } 1474 1475 /* 1476 * Hold the dn_dbufs_mtx while we get the new dbuf 1477 * in the hash table *and* added to the dbufs list. 1478 * This prevents a possible deadlock with someone 1479 * trying to look up this dbuf before its added to the 1480 * dn_dbufs list. 1481 */ 1482 mutex_enter(&dn->dn_dbufs_mtx); 1483 db->db_state = DB_EVICTING; 1484 if ((odb = dbuf_hash_insert(db)) != NULL) { 1485 /* someone else inserted it first */ 1486 kmem_cache_free(dbuf_cache, db); 1487 mutex_exit(&dn->dn_dbufs_mtx); 1488 return (odb); 1489 } 1490 list_insert_head(&dn->dn_dbufs, db); 1491 db->db_state = DB_UNCACHED; 1492 mutex_exit(&dn->dn_dbufs_mtx); 1493 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1494 1495 if (parent && parent != dn->dn_dbuf) 1496 dbuf_add_ref(parent, db); 1497 1498 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1499 refcount_count(&dn->dn_holds) > 0); 1500 (void) refcount_add(&dn->dn_holds, db); 1501 1502 dprintf_dbuf(db, "db=%p\n", db); 1503 1504 return (db); 1505 } 1506 1507 static int 1508 dbuf_do_evict(void *private) 1509 { 1510 arc_buf_t *buf = private; 1511 dmu_buf_impl_t *db = buf->b_private; 1512 1513 if (!MUTEX_HELD(&db->db_mtx)) 1514 mutex_enter(&db->db_mtx); 1515 1516 ASSERT(refcount_is_zero(&db->db_holds)); 1517 1518 if (db->db_state != DB_EVICTING) { 1519 ASSERT(db->db_state == DB_CACHED); 1520 DBUF_VERIFY(db); 1521 db->db_buf = NULL; 1522 dbuf_evict(db); 1523 } else { 1524 mutex_exit(&db->db_mtx); 1525 dbuf_destroy(db); 1526 } 1527 return (0); 1528 } 1529 1530 static void 1531 dbuf_destroy(dmu_buf_impl_t *db) 1532 { 1533 ASSERT(refcount_is_zero(&db->db_holds)); 1534 1535 if (db->db_blkid != DB_BONUS_BLKID) { 1536 /* 1537 * If this dbuf is still on the dn_dbufs list, 1538 * remove it from that list. 1539 */ 1540 if (db->db_dnode) { 1541 dnode_t *dn = db->db_dnode; 1542 1543 mutex_enter(&dn->dn_dbufs_mtx); 1544 list_remove(&dn->dn_dbufs, db); 1545 mutex_exit(&dn->dn_dbufs_mtx); 1546 1547 dnode_rele(dn, db); 1548 db->db_dnode = NULL; 1549 } 1550 dbuf_hash_remove(db); 1551 } 1552 db->db_parent = NULL; 1553 db->db_buf = NULL; 1554 1555 ASSERT(!list_link_active(&db->db_link)); 1556 ASSERT(db->db.db_data == NULL); 1557 ASSERT(db->db_hash_next == NULL); 1558 ASSERT(db->db_blkptr == NULL); 1559 ASSERT(db->db_data_pending == NULL); 1560 1561 kmem_cache_free(dbuf_cache, db); 1562 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1563 } 1564 1565 void 1566 dbuf_prefetch(dnode_t *dn, uint64_t blkid) 1567 { 1568 dmu_buf_impl_t *db = NULL; 1569 blkptr_t *bp = NULL; 1570 1571 ASSERT(blkid != DB_BONUS_BLKID); 1572 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1573 1574 if (dnode_block_freed(dn, blkid)) 1575 return; 1576 1577 /* dbuf_find() returns with db_mtx held */ 1578 if (db = dbuf_find(dn, 0, blkid)) { 1579 if (refcount_count(&db->db_holds) > 0) { 1580 /* 1581 * This dbuf is active. We assume that it is 1582 * already CACHED, or else about to be either 1583 * read or filled. 1584 */ 1585 mutex_exit(&db->db_mtx); 1586 return; 1587 } 1588 mutex_exit(&db->db_mtx); 1589 db = NULL; 1590 } 1591 1592 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) { 1593 if (bp && !BP_IS_HOLE(bp)) { 1594 arc_buf_t *pbuf; 1595 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; 1596 zbookmark_t zb; 1597 zb.zb_objset = dn->dn_objset->os_dsl_dataset ? 1598 dn->dn_objset->os_dsl_dataset->ds_object : 0; 1599 zb.zb_object = dn->dn_object; 1600 zb.zb_level = 0; 1601 zb.zb_blkid = blkid; 1602 1603 if (db) 1604 pbuf = db->db_buf; 1605 else 1606 pbuf = dn->dn_objset->os_phys_buf; 1607 1608 (void) arc_read(NULL, dn->dn_objset->os_spa, 1609 bp, pbuf, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 1610 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1611 &aflags, &zb); 1612 } 1613 if (db) 1614 dbuf_rele(db, NULL); 1615 } 1616 } 1617 1618 /* 1619 * Returns with db_holds incremented, and db_mtx not held. 1620 * Note: dn_struct_rwlock must be held. 1621 */ 1622 int 1623 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, 1624 void *tag, dmu_buf_impl_t **dbp) 1625 { 1626 dmu_buf_impl_t *db, *parent = NULL; 1627 1628 ASSERT(blkid != DB_BONUS_BLKID); 1629 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1630 ASSERT3U(dn->dn_nlevels, >, level); 1631 1632 *dbp = NULL; 1633 top: 1634 /* dbuf_find() returns with db_mtx held */ 1635 db = dbuf_find(dn, level, blkid); 1636 1637 if (db == NULL) { 1638 blkptr_t *bp = NULL; 1639 int err; 1640 1641 ASSERT3P(parent, ==, NULL); 1642 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 1643 if (fail_sparse) { 1644 if (err == 0 && bp && BP_IS_HOLE(bp)) 1645 err = ENOENT; 1646 if (err) { 1647 if (parent) 1648 dbuf_rele(parent, NULL); 1649 return (err); 1650 } 1651 } 1652 if (err && err != ENOENT) 1653 return (err); 1654 db = dbuf_create(dn, level, blkid, parent, bp); 1655 } 1656 1657 if (db->db_buf && refcount_is_zero(&db->db_holds)) { 1658 arc_buf_add_ref(db->db_buf, db); 1659 if (db->db_buf->b_data == NULL) { 1660 dbuf_clear(db); 1661 if (parent) { 1662 dbuf_rele(parent, NULL); 1663 parent = NULL; 1664 } 1665 goto top; 1666 } 1667 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 1668 } 1669 1670 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 1671 1672 /* 1673 * If this buffer is currently syncing out, and we are are 1674 * still referencing it from db_data, we need to make a copy 1675 * of it in case we decide we want to dirty it again in this txg. 1676 */ 1677 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && 1678 dn->dn_object != DMU_META_DNODE_OBJECT && 1679 db->db_state == DB_CACHED && db->db_data_pending) { 1680 dbuf_dirty_record_t *dr = db->db_data_pending; 1681 1682 if (dr->dt.dl.dr_data == db->db_buf) { 1683 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1684 1685 dbuf_set_data(db, 1686 arc_buf_alloc(db->db_dnode->dn_objset->os_spa, 1687 db->db.db_size, db, type)); 1688 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 1689 db->db.db_size); 1690 } 1691 } 1692 1693 (void) refcount_add(&db->db_holds, tag); 1694 dbuf_update_data(db); 1695 DBUF_VERIFY(db); 1696 mutex_exit(&db->db_mtx); 1697 1698 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 1699 if (parent) 1700 dbuf_rele(parent, NULL); 1701 1702 ASSERT3P(db->db_dnode, ==, dn); 1703 ASSERT3U(db->db_blkid, ==, blkid); 1704 ASSERT3U(db->db_level, ==, level); 1705 *dbp = db; 1706 1707 return (0); 1708 } 1709 1710 dmu_buf_impl_t * 1711 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 1712 { 1713 dmu_buf_impl_t *db; 1714 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db); 1715 return (err ? NULL : db); 1716 } 1717 1718 dmu_buf_impl_t * 1719 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 1720 { 1721 dmu_buf_impl_t *db; 1722 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db); 1723 return (err ? NULL : db); 1724 } 1725 1726 void 1727 dbuf_create_bonus(dnode_t *dn) 1728 { 1729 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1730 1731 ASSERT(dn->dn_bonus == NULL); 1732 dn->dn_bonus = dbuf_create(dn, 0, DB_BONUS_BLKID, dn->dn_dbuf, NULL); 1733 } 1734 1735 #pragma weak dmu_buf_add_ref = dbuf_add_ref 1736 void 1737 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 1738 { 1739 int64_t holds = refcount_add(&db->db_holds, tag); 1740 ASSERT(holds > 1); 1741 } 1742 1743 #pragma weak dmu_buf_rele = dbuf_rele 1744 void 1745 dbuf_rele(dmu_buf_impl_t *db, void *tag) 1746 { 1747 int64_t holds; 1748 1749 mutex_enter(&db->db_mtx); 1750 DBUF_VERIFY(db); 1751 1752 holds = refcount_remove(&db->db_holds, tag); 1753 ASSERT(holds >= 0); 1754 1755 /* 1756 * We can't freeze indirects if there is a possibility that they 1757 * may be modified in the current syncing context. 1758 */ 1759 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) 1760 arc_buf_freeze(db->db_buf); 1761 1762 if (holds == db->db_dirtycnt && 1763 db->db_level == 0 && db->db_immediate_evict) 1764 dbuf_evict_user(db); 1765 1766 if (holds == 0) { 1767 if (db->db_blkid == DB_BONUS_BLKID) { 1768 mutex_exit(&db->db_mtx); 1769 dnode_rele(db->db_dnode, db); 1770 } else if (db->db_buf == NULL) { 1771 /* 1772 * This is a special case: we never associated this 1773 * dbuf with any data allocated from the ARC. 1774 */ 1775 ASSERT(db->db_state == DB_UNCACHED || 1776 db->db_state == DB_NOFILL); 1777 dbuf_evict(db); 1778 } else if (arc_released(db->db_buf)) { 1779 arc_buf_t *buf = db->db_buf; 1780 /* 1781 * This dbuf has anonymous data associated with it. 1782 */ 1783 dbuf_set_data(db, NULL); 1784 VERIFY(arc_buf_remove_ref(buf, db) == 1); 1785 dbuf_evict(db); 1786 } else { 1787 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0); 1788 if (!DBUF_IS_CACHEABLE(db)) 1789 dbuf_clear(db); 1790 else 1791 mutex_exit(&db->db_mtx); 1792 } 1793 } else { 1794 mutex_exit(&db->db_mtx); 1795 } 1796 } 1797 1798 #pragma weak dmu_buf_refcount = dbuf_refcount 1799 uint64_t 1800 dbuf_refcount(dmu_buf_impl_t *db) 1801 { 1802 return (refcount_count(&db->db_holds)); 1803 } 1804 1805 void * 1806 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 1807 dmu_buf_evict_func_t *evict_func) 1808 { 1809 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 1810 user_data_ptr_ptr, evict_func)); 1811 } 1812 1813 void * 1814 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 1815 dmu_buf_evict_func_t *evict_func) 1816 { 1817 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1818 1819 db->db_immediate_evict = TRUE; 1820 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 1821 user_data_ptr_ptr, evict_func)); 1822 } 1823 1824 void * 1825 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr, 1826 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func) 1827 { 1828 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1829 ASSERT(db->db_level == 0); 1830 1831 ASSERT((user_ptr == NULL) == (evict_func == NULL)); 1832 1833 mutex_enter(&db->db_mtx); 1834 1835 if (db->db_user_ptr == old_user_ptr) { 1836 db->db_user_ptr = user_ptr; 1837 db->db_user_data_ptr_ptr = user_data_ptr_ptr; 1838 db->db_evict_func = evict_func; 1839 1840 dbuf_update_data(db); 1841 } else { 1842 old_user_ptr = db->db_user_ptr; 1843 } 1844 1845 mutex_exit(&db->db_mtx); 1846 return (old_user_ptr); 1847 } 1848 1849 void * 1850 dmu_buf_get_user(dmu_buf_t *db_fake) 1851 { 1852 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1853 ASSERT(!refcount_is_zero(&db->db_holds)); 1854 1855 return (db->db_user_ptr); 1856 } 1857 1858 static void 1859 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 1860 { 1861 /* ASSERT(dmu_tx_is_syncing(tx) */ 1862 ASSERT(MUTEX_HELD(&db->db_mtx)); 1863 1864 if (db->db_blkptr != NULL) 1865 return; 1866 1867 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 1868 /* 1869 * This buffer was allocated at a time when there was 1870 * no available blkptrs from the dnode, or it was 1871 * inappropriate to hook it in (i.e., nlevels mis-match). 1872 */ 1873 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 1874 ASSERT(db->db_parent == NULL); 1875 db->db_parent = dn->dn_dbuf; 1876 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 1877 DBUF_VERIFY(db); 1878 } else { 1879 dmu_buf_impl_t *parent = db->db_parent; 1880 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 1881 1882 ASSERT(dn->dn_phys->dn_nlevels > 1); 1883 if (parent == NULL) { 1884 mutex_exit(&db->db_mtx); 1885 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1886 (void) dbuf_hold_impl(dn, db->db_level+1, 1887 db->db_blkid >> epbs, FALSE, db, &parent); 1888 rw_exit(&dn->dn_struct_rwlock); 1889 mutex_enter(&db->db_mtx); 1890 db->db_parent = parent; 1891 } 1892 db->db_blkptr = (blkptr_t *)parent->db.db_data + 1893 (db->db_blkid & ((1ULL << epbs) - 1)); 1894 DBUF_VERIFY(db); 1895 } 1896 } 1897 1898 static void 1899 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 1900 { 1901 dmu_buf_impl_t *db = dr->dr_dbuf; 1902 dnode_t *dn = db->db_dnode; 1903 zio_t *zio; 1904 1905 ASSERT(dmu_tx_is_syncing(tx)); 1906 1907 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 1908 1909 mutex_enter(&db->db_mtx); 1910 1911 ASSERT(db->db_level > 0); 1912 DBUF_VERIFY(db); 1913 1914 if (db->db_buf == NULL) { 1915 mutex_exit(&db->db_mtx); 1916 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 1917 mutex_enter(&db->db_mtx); 1918 } 1919 ASSERT3U(db->db_state, ==, DB_CACHED); 1920 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 1921 ASSERT(db->db_buf != NULL); 1922 1923 dbuf_check_blkptr(dn, db); 1924 1925 db->db_data_pending = dr; 1926 1927 mutex_exit(&db->db_mtx); 1928 dbuf_write(dr, db->db_buf, tx); 1929 1930 zio = dr->dr_zio; 1931 mutex_enter(&dr->dt.di.dr_mtx); 1932 dbuf_sync_list(&dr->dt.di.dr_children, tx); 1933 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 1934 mutex_exit(&dr->dt.di.dr_mtx); 1935 zio_nowait(zio); 1936 } 1937 1938 static void 1939 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 1940 { 1941 arc_buf_t **datap = &dr->dt.dl.dr_data; 1942 dmu_buf_impl_t *db = dr->dr_dbuf; 1943 dnode_t *dn = db->db_dnode; 1944 objset_impl_t *os = dn->dn_objset; 1945 uint64_t txg = tx->tx_txg; 1946 1947 ASSERT(dmu_tx_is_syncing(tx)); 1948 1949 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 1950 1951 mutex_enter(&db->db_mtx); 1952 /* 1953 * To be synced, we must be dirtied. But we 1954 * might have been freed after the dirty. 1955 */ 1956 if (db->db_state == DB_UNCACHED) { 1957 /* This buffer has been freed since it was dirtied */ 1958 ASSERT(db->db.db_data == NULL); 1959 } else if (db->db_state == DB_FILL) { 1960 /* This buffer was freed and is now being re-filled */ 1961 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 1962 } else { 1963 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 1964 } 1965 DBUF_VERIFY(db); 1966 1967 /* 1968 * If this is a bonus buffer, simply copy the bonus data into the 1969 * dnode. It will be written out when the dnode is synced (and it 1970 * will be synced, since it must have been dirty for dbuf_sync to 1971 * be called). 1972 */ 1973 if (db->db_blkid == DB_BONUS_BLKID) { 1974 dbuf_dirty_record_t **drp; 1975 1976 ASSERT(*datap != NULL); 1977 ASSERT3U(db->db_level, ==, 0); 1978 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 1979 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 1980 if (*datap != db->db.db_data) { 1981 zio_buf_free(*datap, DN_MAX_BONUSLEN); 1982 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1983 } 1984 db->db_data_pending = NULL; 1985 drp = &db->db_last_dirty; 1986 while (*drp != dr) 1987 drp = &(*drp)->dr_next; 1988 ASSERT(dr->dr_next == NULL); 1989 *drp = dr->dr_next; 1990 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1991 ASSERT(db->db_dirtycnt > 0); 1992 db->db_dirtycnt -= 1; 1993 mutex_exit(&db->db_mtx); 1994 dbuf_rele(db, (void *)(uintptr_t)txg); 1995 return; 1996 } 1997 1998 /* 1999 * This function may have dropped the db_mtx lock allowing a dmu_sync 2000 * operation to sneak in. As a result, we need to ensure that we 2001 * don't check the dr_override_state until we have returned from 2002 * dbuf_check_blkptr. 2003 */ 2004 dbuf_check_blkptr(dn, db); 2005 2006 /* 2007 * If this buffer is in the middle of an immdiate write, 2008 * wait for the synchronous IO to complete. 2009 */ 2010 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 2011 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 2012 cv_wait(&db->db_changed, &db->db_mtx); 2013 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 2014 } 2015 2016 /* 2017 * If this dbuf has already been written out via an immediate write, 2018 * just complete the write by copying over the new block pointer and 2019 * updating the accounting via the write-completion functions. 2020 */ 2021 if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2022 zio_t zio_fake; 2023 2024 zio_fake.io_private = &db; 2025 zio_fake.io_error = 0; 2026 zio_fake.io_bp = db->db_blkptr; 2027 zio_fake.io_bp_orig = *db->db_blkptr; 2028 zio_fake.io_txg = txg; 2029 zio_fake.io_flags = 0; 2030 2031 *db->db_blkptr = dr->dt.dl.dr_overridden_by; 2032 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 2033 db->db_data_pending = dr; 2034 dr->dr_zio = &zio_fake; 2035 mutex_exit(&db->db_mtx); 2036 2037 ASSERT(!DVA_EQUAL(BP_IDENTITY(zio_fake.io_bp), 2038 BP_IDENTITY(&zio_fake.io_bp_orig)) || 2039 BP_IS_HOLE(zio_fake.io_bp)); 2040 2041 if (BP_IS_OLDER(&zio_fake.io_bp_orig, txg)) 2042 (void) dsl_dataset_block_kill(os->os_dsl_dataset, 2043 &zio_fake.io_bp_orig, dn->dn_zio, tx); 2044 2045 dbuf_write_ready(&zio_fake, db->db_buf, db); 2046 dbuf_write_done(&zio_fake, db->db_buf, db); 2047 2048 return; 2049 } 2050 2051 if (db->db_state != DB_NOFILL && 2052 dn->dn_object != DMU_META_DNODE_OBJECT && 2053 refcount_count(&db->db_holds) > 1 && 2054 *datap == db->db_buf) { 2055 /* 2056 * If this buffer is currently "in use" (i.e., there 2057 * are active holds and db_data still references it), 2058 * then make a copy before we start the write so that 2059 * any modifications from the open txg will not leak 2060 * into this write. 2061 * 2062 * NOTE: this copy does not need to be made for 2063 * objects only modified in the syncing context (e.g. 2064 * DNONE_DNODE blocks). 2065 */ 2066 int blksz = arc_buf_size(*datap); 2067 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2068 *datap = arc_buf_alloc(os->os_spa, blksz, db, type); 2069 bcopy(db->db.db_data, (*datap)->b_data, blksz); 2070 } 2071 db->db_data_pending = dr; 2072 2073 mutex_exit(&db->db_mtx); 2074 2075 dbuf_write(dr, *datap, tx); 2076 2077 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2078 if (dn->dn_object == DMU_META_DNODE_OBJECT) 2079 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 2080 else 2081 zio_nowait(dr->dr_zio); 2082 } 2083 2084 void 2085 dbuf_sync_list(list_t *list, dmu_tx_t *tx) 2086 { 2087 dbuf_dirty_record_t *dr; 2088 2089 while (dr = list_head(list)) { 2090 if (dr->dr_zio != NULL) { 2091 /* 2092 * If we find an already initialized zio then we 2093 * are processing the meta-dnode, and we have finished. 2094 * The dbufs for all dnodes are put back on the list 2095 * during processing, so that we can zio_wait() 2096 * these IOs after initiating all child IOs. 2097 */ 2098 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 2099 DMU_META_DNODE_OBJECT); 2100 break; 2101 } 2102 list_remove(list, dr); 2103 if (dr->dr_dbuf->db_level > 0) 2104 dbuf_sync_indirect(dr, tx); 2105 else 2106 dbuf_sync_leaf(dr, tx); 2107 } 2108 } 2109 2110 static void 2111 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 2112 { 2113 dmu_buf_impl_t *db = dr->dr_dbuf; 2114 dnode_t *dn = db->db_dnode; 2115 objset_impl_t *os = dn->dn_objset; 2116 dmu_buf_impl_t *parent = db->db_parent; 2117 uint64_t txg = tx->tx_txg; 2118 zbookmark_t zb; 2119 writeprops_t wp = { 0 }; 2120 zio_t *zio; 2121 2122 if (!BP_IS_HOLE(db->db_blkptr) && 2123 (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE)) { 2124 /* 2125 * Private object buffers are released here rather 2126 * than in dbuf_dirty() since they are only modified 2127 * in the syncing context and we don't want the 2128 * overhead of making multiple copies of the data. 2129 */ 2130 arc_release(data, db); 2131 } else if (db->db_state != DB_NOFILL) { 2132 ASSERT(arc_released(data)); 2133 /* XXX why do we need to thaw here? */ 2134 arc_buf_thaw(data); 2135 } 2136 2137 if (parent != dn->dn_dbuf) { 2138 ASSERT(parent && parent->db_data_pending); 2139 ASSERT(db->db_level == parent->db_level-1); 2140 ASSERT(arc_released(parent->db_buf)); 2141 zio = parent->db_data_pending->dr_zio; 2142 } else { 2143 ASSERT(db->db_level == dn->dn_phys->dn_nlevels-1); 2144 ASSERT3P(db->db_blkptr, ==, 2145 &dn->dn_phys->dn_blkptr[db->db_blkid]); 2146 zio = dn->dn_zio; 2147 } 2148 2149 ASSERT(db->db_level == 0 || data == db->db_buf); 2150 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 2151 ASSERT(zio); 2152 2153 zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; 2154 zb.zb_object = db->db.db_object; 2155 zb.zb_level = db->db_level; 2156 zb.zb_blkid = db->db_blkid; 2157 2158 wp.wp_type = dn->dn_type; 2159 wp.wp_level = db->db_level; 2160 wp.wp_copies = os->os_copies; 2161 wp.wp_dncompress = dn->dn_compress; 2162 wp.wp_oscompress = os->os_compress; 2163 wp.wp_dnchecksum = dn->dn_checksum; 2164 wp.wp_oschecksum = os->os_checksum; 2165 2166 if (BP_IS_OLDER(db->db_blkptr, txg)) 2167 (void) dsl_dataset_block_kill( 2168 os->os_dsl_dataset, db->db_blkptr, zio, tx); 2169 2170 if (db->db_state == DB_NOFILL) { 2171 zio_prop_t zp = { 0 }; 2172 2173 write_policy(os->os_spa, &wp, &zp); 2174 dr->dr_zio = zio_write(zio, os->os_spa, 2175 txg, db->db_blkptr, NULL, 2176 db->db.db_size, &zp, dbuf_skip_write_ready, 2177 dbuf_skip_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, 2178 ZIO_FLAG_MUSTSUCCEED, &zb); 2179 } else { 2180 dr->dr_zio = arc_write(zio, os->os_spa, &wp, 2181 DBUF_IS_L2CACHEABLE(db), txg, db->db_blkptr, 2182 data, dbuf_write_ready, dbuf_write_done, db, 2183 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 2184 } 2185 } 2186 2187 /* wrapper function for dbuf_write_ready bypassing ARC */ 2188 static void 2189 dbuf_skip_write_ready(zio_t *zio) 2190 { 2191 blkptr_t *bp = zio->io_bp; 2192 2193 if (!BP_IS_GANG(bp)) 2194 zio_skip_write(zio); 2195 2196 dbuf_write_ready(zio, NULL, zio->io_private); 2197 } 2198 2199 /* wrapper function for dbuf_write_done bypassing ARC */ 2200 static void 2201 dbuf_skip_write_done(zio_t *zio) 2202 { 2203 dbuf_write_done(zio, NULL, zio->io_private); 2204 } 2205 2206 /* ARGSUSED */ 2207 static void 2208 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 2209 { 2210 dmu_buf_impl_t *db = vdb; 2211 dnode_t *dn = db->db_dnode; 2212 objset_impl_t *os = dn->dn_objset; 2213 blkptr_t *bp = zio->io_bp; 2214 blkptr_t *bp_orig = &zio->io_bp_orig; 2215 uint64_t fill = 0; 2216 int old_size, new_size, i; 2217 2218 ASSERT(db->db_blkptr == bp); 2219 2220 dprintf_dbuf_bp(db, bp_orig, "bp_orig: %s", ""); 2221 2222 old_size = bp_get_dasize(os->os_spa, bp_orig); 2223 new_size = bp_get_dasize(os->os_spa, bp); 2224 2225 dnode_diduse_space(dn, new_size - old_size); 2226 2227 if (BP_IS_HOLE(bp)) { 2228 dsl_dataset_t *ds = os->os_dsl_dataset; 2229 dmu_tx_t *tx = os->os_synctx; 2230 2231 if (bp_orig->blk_birth == tx->tx_txg) 2232 (void) dsl_dataset_block_kill(ds, bp_orig, zio, tx); 2233 ASSERT3U(bp->blk_fill, ==, 0); 2234 return; 2235 } 2236 2237 ASSERT(BP_GET_TYPE(bp) == dn->dn_type); 2238 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 2239 2240 mutex_enter(&db->db_mtx); 2241 2242 if (db->db_level == 0) { 2243 mutex_enter(&dn->dn_mtx); 2244 if (db->db_blkid > dn->dn_phys->dn_maxblkid) 2245 dn->dn_phys->dn_maxblkid = db->db_blkid; 2246 mutex_exit(&dn->dn_mtx); 2247 2248 if (dn->dn_type == DMU_OT_DNODE) { 2249 dnode_phys_t *dnp = db->db.db_data; 2250 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 2251 i--, dnp++) { 2252 if (dnp->dn_type != DMU_OT_NONE) 2253 fill++; 2254 } 2255 } else { 2256 fill = 1; 2257 } 2258 } else { 2259 blkptr_t *ibp = db->db.db_data; 2260 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2261 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 2262 if (BP_IS_HOLE(ibp)) 2263 continue; 2264 ASSERT3U(BP_GET_LSIZE(ibp), ==, 2265 db->db_level == 1 ? dn->dn_datablksz : 2266 (1<<dn->dn_phys->dn_indblkshift)); 2267 fill += ibp->blk_fill; 2268 } 2269 } 2270 2271 bp->blk_fill = fill; 2272 2273 mutex_exit(&db->db_mtx); 2274 2275 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 2276 ASSERT(DVA_EQUAL(BP_IDENTITY(bp), BP_IDENTITY(bp_orig))); 2277 } else { 2278 dsl_dataset_t *ds = os->os_dsl_dataset; 2279 dmu_tx_t *tx = os->os_synctx; 2280 2281 if (bp_orig->blk_birth == tx->tx_txg) 2282 (void) dsl_dataset_block_kill(ds, bp_orig, zio, tx); 2283 dsl_dataset_block_born(ds, bp, tx); 2284 } 2285 } 2286 2287 /* ARGSUSED */ 2288 static void 2289 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 2290 { 2291 dmu_buf_impl_t *db = vdb; 2292 uint64_t txg = zio->io_txg; 2293 dbuf_dirty_record_t **drp, *dr; 2294 2295 ASSERT3U(zio->io_error, ==, 0); 2296 2297 mutex_enter(&db->db_mtx); 2298 2299 drp = &db->db_last_dirty; 2300 while ((dr = *drp) != db->db_data_pending) 2301 drp = &dr->dr_next; 2302 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2303 ASSERT(dr->dr_txg == txg); 2304 ASSERT(dr->dr_next == NULL); 2305 *drp = dr->dr_next; 2306 2307 if (db->db_level == 0) { 2308 ASSERT(db->db_blkid != DB_BONUS_BLKID); 2309 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2310 2311 if (db->db_state != DB_NOFILL) { 2312 if (dr->dt.dl.dr_data != db->db_buf) 2313 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 2314 db) == 1); 2315 else if (!BP_IS_HOLE(db->db_blkptr)) 2316 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2317 else 2318 ASSERT(arc_released(db->db_buf)); 2319 } 2320 } else { 2321 dnode_t *dn = db->db_dnode; 2322 2323 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2324 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2325 if (!BP_IS_HOLE(db->db_blkptr)) { 2326 int epbs = 2327 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2328 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 2329 db->db.db_size); 2330 ASSERT3U(dn->dn_phys->dn_maxblkid 2331 >> (db->db_level * epbs), >=, db->db_blkid); 2332 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2333 } 2334 mutex_destroy(&dr->dt.di.dr_mtx); 2335 list_destroy(&dr->dt.di.dr_children); 2336 } 2337 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2338 2339 cv_broadcast(&db->db_changed); 2340 ASSERT(db->db_dirtycnt > 0); 2341 db->db_dirtycnt -= 1; 2342 db->db_data_pending = NULL; 2343 mutex_exit(&db->db_mtx); 2344 2345 dprintf_dbuf_bp(db, zio->io_bp, "bp: %s", ""); 2346 2347 dbuf_rele(db, (void *)(uintptr_t)txg); 2348 } 2349