1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu.h> 28 #include <sys/dmu_impl.h> 29 #include <sys/dbuf.h> 30 #include <sys/dmu_objset.h> 31 #include <sys/dsl_dataset.h> 32 #include <sys/dsl_dir.h> 33 #include <sys/dmu_tx.h> 34 #include <sys/spa.h> 35 #include <sys/zio.h> 36 #include <sys/dmu_zfetch.h> 37 38 static void dbuf_destroy(dmu_buf_impl_t *db); 39 static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 40 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 41 static arc_done_func_t dbuf_write_ready; 42 static arc_done_func_t dbuf_write_done; 43 static zio_done_func_t dbuf_skip_write_ready; 44 static zio_done_func_t dbuf_skip_write_done; 45 46 /* 47 * Global data structures and functions for the dbuf cache. 48 */ 49 static kmem_cache_t *dbuf_cache; 50 51 /* ARGSUSED */ 52 static int 53 dbuf_cons(void *vdb, void *unused, int kmflag) 54 { 55 dmu_buf_impl_t *db = vdb; 56 bzero(db, sizeof (dmu_buf_impl_t)); 57 58 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 59 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 60 refcount_create(&db->db_holds); 61 return (0); 62 } 63 64 /* ARGSUSED */ 65 static void 66 dbuf_dest(void *vdb, void *unused) 67 { 68 dmu_buf_impl_t *db = vdb; 69 mutex_destroy(&db->db_mtx); 70 cv_destroy(&db->db_changed); 71 refcount_destroy(&db->db_holds); 72 } 73 74 /* 75 * dbuf hash table routines 76 */ 77 static dbuf_hash_table_t dbuf_hash_table; 78 79 static uint64_t dbuf_hash_count; 80 81 static uint64_t 82 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 83 { 84 uintptr_t osv = (uintptr_t)os; 85 uint64_t crc = -1ULL; 86 87 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 88 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 89 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 90 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 91 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 92 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 93 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 94 95 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 96 97 return (crc); 98 } 99 100 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); 101 102 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 103 ((dbuf)->db.db_object == (obj) && \ 104 (dbuf)->db_objset == (os) && \ 105 (dbuf)->db_level == (level) && \ 106 (dbuf)->db_blkid == (blkid)) 107 108 dmu_buf_impl_t * 109 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) 110 { 111 dbuf_hash_table_t *h = &dbuf_hash_table; 112 objset_impl_t *os = dn->dn_objset; 113 uint64_t obj = dn->dn_object; 114 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 115 uint64_t idx = hv & h->hash_table_mask; 116 dmu_buf_impl_t *db; 117 118 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 119 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 120 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 121 mutex_enter(&db->db_mtx); 122 if (db->db_state != DB_EVICTING) { 123 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 124 return (db); 125 } 126 mutex_exit(&db->db_mtx); 127 } 128 } 129 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 130 return (NULL); 131 } 132 133 /* 134 * Insert an entry into the hash table. If there is already an element 135 * equal to elem in the hash table, then the already existing element 136 * will be returned and the new element will not be inserted. 137 * Otherwise returns NULL. 138 */ 139 static dmu_buf_impl_t * 140 dbuf_hash_insert(dmu_buf_impl_t *db) 141 { 142 dbuf_hash_table_t *h = &dbuf_hash_table; 143 objset_impl_t *os = db->db_objset; 144 uint64_t obj = db->db.db_object; 145 int level = db->db_level; 146 uint64_t blkid = db->db_blkid; 147 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 148 uint64_t idx = hv & h->hash_table_mask; 149 dmu_buf_impl_t *dbf; 150 151 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 152 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 153 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 154 mutex_enter(&dbf->db_mtx); 155 if (dbf->db_state != DB_EVICTING) { 156 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 157 return (dbf); 158 } 159 mutex_exit(&dbf->db_mtx); 160 } 161 } 162 163 mutex_enter(&db->db_mtx); 164 db->db_hash_next = h->hash_table[idx]; 165 h->hash_table[idx] = db; 166 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 167 atomic_add_64(&dbuf_hash_count, 1); 168 169 return (NULL); 170 } 171 172 /* 173 * Remove an entry from the hash table. This operation will 174 * fail if there are any existing holds on the db. 175 */ 176 static void 177 dbuf_hash_remove(dmu_buf_impl_t *db) 178 { 179 dbuf_hash_table_t *h = &dbuf_hash_table; 180 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, 181 db->db_level, db->db_blkid); 182 uint64_t idx = hv & h->hash_table_mask; 183 dmu_buf_impl_t *dbf, **dbp; 184 185 /* 186 * We musn't hold db_mtx to maintin lock ordering: 187 * DBUF_HASH_MUTEX > db_mtx. 188 */ 189 ASSERT(refcount_is_zero(&db->db_holds)); 190 ASSERT(db->db_state == DB_EVICTING); 191 ASSERT(!MUTEX_HELD(&db->db_mtx)); 192 193 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 194 dbp = &h->hash_table[idx]; 195 while ((dbf = *dbp) != db) { 196 dbp = &dbf->db_hash_next; 197 ASSERT(dbf != NULL); 198 } 199 *dbp = db->db_hash_next; 200 db->db_hash_next = NULL; 201 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 202 atomic_add_64(&dbuf_hash_count, -1); 203 } 204 205 static arc_evict_func_t dbuf_do_evict; 206 207 static void 208 dbuf_evict_user(dmu_buf_impl_t *db) 209 { 210 ASSERT(MUTEX_HELD(&db->db_mtx)); 211 212 if (db->db_level != 0 || db->db_evict_func == NULL) 213 return; 214 215 if (db->db_user_data_ptr_ptr) 216 *db->db_user_data_ptr_ptr = db->db.db_data; 217 db->db_evict_func(&db->db, db->db_user_ptr); 218 db->db_user_ptr = NULL; 219 db->db_user_data_ptr_ptr = NULL; 220 db->db_evict_func = NULL; 221 } 222 223 void 224 dbuf_evict(dmu_buf_impl_t *db) 225 { 226 ASSERT(MUTEX_HELD(&db->db_mtx)); 227 ASSERT(db->db_buf == NULL); 228 ASSERT(db->db_data_pending == NULL); 229 230 dbuf_clear(db); 231 dbuf_destroy(db); 232 } 233 234 void 235 dbuf_init(void) 236 { 237 uint64_t hsize = 1ULL << 16; 238 dbuf_hash_table_t *h = &dbuf_hash_table; 239 int i; 240 241 /* 242 * The hash table is big enough to fill all of physical memory 243 * with an average 4K block size. The table will take up 244 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 245 */ 246 while (hsize * 4096 < physmem * PAGESIZE) 247 hsize <<= 1; 248 249 retry: 250 h->hash_table_mask = hsize - 1; 251 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 252 if (h->hash_table == NULL) { 253 /* XXX - we should really return an error instead of assert */ 254 ASSERT(hsize > (1ULL << 10)); 255 hsize >>= 1; 256 goto retry; 257 } 258 259 dbuf_cache = kmem_cache_create("dmu_buf_impl_t", 260 sizeof (dmu_buf_impl_t), 261 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 262 263 for (i = 0; i < DBUF_MUTEXES; i++) 264 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 265 } 266 267 void 268 dbuf_fini(void) 269 { 270 dbuf_hash_table_t *h = &dbuf_hash_table; 271 int i; 272 273 for (i = 0; i < DBUF_MUTEXES; i++) 274 mutex_destroy(&h->hash_mutexes[i]); 275 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 276 kmem_cache_destroy(dbuf_cache); 277 } 278 279 /* 280 * Other stuff. 281 */ 282 283 #ifdef ZFS_DEBUG 284 static void 285 dbuf_verify(dmu_buf_impl_t *db) 286 { 287 dnode_t *dn = db->db_dnode; 288 289 ASSERT(MUTEX_HELD(&db->db_mtx)); 290 291 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 292 return; 293 294 ASSERT(db->db_objset != NULL); 295 if (dn == NULL) { 296 ASSERT(db->db_parent == NULL); 297 ASSERT(db->db_blkptr == NULL); 298 } else { 299 ASSERT3U(db->db.db_object, ==, dn->dn_object); 300 ASSERT3P(db->db_objset, ==, dn->dn_objset); 301 ASSERT3U(db->db_level, <, dn->dn_nlevels); 302 ASSERT(db->db_blkid == DB_BONUS_BLKID || 303 list_head(&dn->dn_dbufs)); 304 } 305 if (db->db_blkid == DB_BONUS_BLKID) { 306 ASSERT(dn != NULL); 307 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 308 ASSERT3U(db->db.db_offset, ==, DB_BONUS_BLKID); 309 } else { 310 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 311 } 312 313 /* 314 * We can't assert that db_size matches dn_datablksz because it 315 * can be momentarily different when another thread is doing 316 * dnode_set_blksz(). 317 */ 318 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 319 dbuf_dirty_record_t *dr = db->db_data_pending; 320 /* 321 * It should only be modified in syncing context, so 322 * make sure we only have one copy of the data. 323 */ 324 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 325 } 326 327 /* verify db->db_blkptr */ 328 if (db->db_blkptr) { 329 if (db->db_parent == dn->dn_dbuf) { 330 /* db is pointed to by the dnode */ 331 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 332 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 333 ASSERT(db->db_parent == NULL); 334 else 335 ASSERT(db->db_parent != NULL); 336 ASSERT3P(db->db_blkptr, ==, 337 &dn->dn_phys->dn_blkptr[db->db_blkid]); 338 } else { 339 /* db is pointed to by an indirect block */ 340 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 341 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 342 ASSERT3U(db->db_parent->db.db_object, ==, 343 db->db.db_object); 344 /* 345 * dnode_grow_indblksz() can make this fail if we don't 346 * have the struct_rwlock. XXX indblksz no longer 347 * grows. safe to do this now? 348 */ 349 if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) { 350 ASSERT3P(db->db_blkptr, ==, 351 ((blkptr_t *)db->db_parent->db.db_data + 352 db->db_blkid % epb)); 353 } 354 } 355 } 356 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 357 db->db.db_data && db->db_blkid != DB_BONUS_BLKID && 358 db->db_state != DB_FILL && !dn->dn_free_txg) { 359 /* 360 * If the blkptr isn't set but they have nonzero data, 361 * it had better be dirty, otherwise we'll lose that 362 * data when we evict this buffer. 363 */ 364 if (db->db_dirtycnt == 0) { 365 uint64_t *buf = db->db.db_data; 366 int i; 367 368 for (i = 0; i < db->db.db_size >> 3; i++) { 369 ASSERT(buf[i] == 0); 370 } 371 } 372 } 373 } 374 #endif 375 376 static void 377 dbuf_update_data(dmu_buf_impl_t *db) 378 { 379 ASSERT(MUTEX_HELD(&db->db_mtx)); 380 if (db->db_level == 0 && db->db_user_data_ptr_ptr) { 381 ASSERT(!refcount_is_zero(&db->db_holds)); 382 *db->db_user_data_ptr_ptr = db->db.db_data; 383 } 384 } 385 386 static void 387 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 388 { 389 ASSERT(MUTEX_HELD(&db->db_mtx)); 390 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf)); 391 db->db_buf = buf; 392 if (buf != NULL) { 393 ASSERT(buf->b_data != NULL); 394 db->db.db_data = buf->b_data; 395 if (!arc_released(buf)) 396 arc_set_callback(buf, dbuf_do_evict, db); 397 dbuf_update_data(db); 398 } else { 399 dbuf_evict_user(db); 400 db->db.db_data = NULL; 401 if (db->db_state != DB_NOFILL) 402 db->db_state = DB_UNCACHED; 403 } 404 } 405 406 uint64_t 407 dbuf_whichblock(dnode_t *dn, uint64_t offset) 408 { 409 if (dn->dn_datablkshift) { 410 return (offset >> dn->dn_datablkshift); 411 } else { 412 ASSERT3U(offset, <, dn->dn_datablksz); 413 return (0); 414 } 415 } 416 417 static void 418 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 419 { 420 dmu_buf_impl_t *db = vdb; 421 422 mutex_enter(&db->db_mtx); 423 ASSERT3U(db->db_state, ==, DB_READ); 424 /* 425 * All reads are synchronous, so we must have a hold on the dbuf 426 */ 427 ASSERT(refcount_count(&db->db_holds) > 0); 428 ASSERT(db->db_buf == NULL); 429 ASSERT(db->db.db_data == NULL); 430 if (db->db_level == 0 && db->db_freed_in_flight) { 431 /* we were freed in flight; disregard any error */ 432 arc_release(buf, db); 433 bzero(buf->b_data, db->db.db_size); 434 arc_buf_freeze(buf); 435 db->db_freed_in_flight = FALSE; 436 dbuf_set_data(db, buf); 437 db->db_state = DB_CACHED; 438 } else if (zio == NULL || zio->io_error == 0) { 439 dbuf_set_data(db, buf); 440 db->db_state = DB_CACHED; 441 } else { 442 ASSERT(db->db_blkid != DB_BONUS_BLKID); 443 ASSERT3P(db->db_buf, ==, NULL); 444 VERIFY(arc_buf_remove_ref(buf, db) == 1); 445 db->db_state = DB_UNCACHED; 446 } 447 cv_broadcast(&db->db_changed); 448 mutex_exit(&db->db_mtx); 449 dbuf_rele(db, NULL); 450 } 451 452 static void 453 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) 454 { 455 dnode_t *dn = db->db_dnode; 456 zbookmark_t zb; 457 uint32_t aflags = ARC_NOWAIT; 458 arc_buf_t *pbuf; 459 460 ASSERT(!refcount_is_zero(&db->db_holds)); 461 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 462 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 463 ASSERT(MUTEX_HELD(&db->db_mtx)); 464 ASSERT(db->db_state == DB_UNCACHED); 465 ASSERT(db->db_buf == NULL); 466 467 if (db->db_blkid == DB_BONUS_BLKID) { 468 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 469 470 ASSERT3U(bonuslen, <=, db->db.db_size); 471 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 472 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 473 if (bonuslen < DN_MAX_BONUSLEN) 474 bzero(db->db.db_data, DN_MAX_BONUSLEN); 475 if (bonuslen) 476 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 477 dbuf_update_data(db); 478 db->db_state = DB_CACHED; 479 mutex_exit(&db->db_mtx); 480 return; 481 } 482 483 /* 484 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 485 * processes the delete record and clears the bp while we are waiting 486 * for the dn_mtx (resulting in a "no" from block_freed). 487 */ 488 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 489 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 490 BP_IS_HOLE(db->db_blkptr)))) { 491 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 492 493 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa, 494 db->db.db_size, db, type)); 495 bzero(db->db.db_data, db->db.db_size); 496 db->db_state = DB_CACHED; 497 *flags |= DB_RF_CACHED; 498 mutex_exit(&db->db_mtx); 499 return; 500 } 501 502 db->db_state = DB_READ; 503 mutex_exit(&db->db_mtx); 504 505 if (DBUF_IS_L2CACHEABLE(db)) 506 aflags |= ARC_L2CACHE; 507 508 zb.zb_objset = db->db_objset->os_dsl_dataset ? 509 db->db_objset->os_dsl_dataset->ds_object : 0; 510 zb.zb_object = db->db.db_object; 511 zb.zb_level = db->db_level; 512 zb.zb_blkid = db->db_blkid; 513 514 dbuf_add_ref(db, NULL); 515 /* ZIO_FLAG_CANFAIL callers have to check the parent zio's error */ 516 517 if (db->db_parent) 518 pbuf = db->db_parent->db_buf; 519 else 520 pbuf = db->db_objset->os_phys_buf; 521 522 (void) arc_read(zio, dn->dn_objset->os_spa, db->db_blkptr, pbuf, 523 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 524 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 525 &aflags, &zb); 526 if (aflags & ARC_CACHED) 527 *flags |= DB_RF_CACHED; 528 } 529 530 int 531 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 532 { 533 int err = 0; 534 int havepzio = (zio != NULL); 535 int prefetch; 536 537 /* 538 * We don't have to hold the mutex to check db_state because it 539 * can't be freed while we have a hold on the buffer. 540 */ 541 ASSERT(!refcount_is_zero(&db->db_holds)); 542 543 if (db->db_state == DB_NOFILL) 544 return (EIO); 545 546 if ((flags & DB_RF_HAVESTRUCT) == 0) 547 rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER); 548 549 prefetch = db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && 550 (flags & DB_RF_NOPREFETCH) == 0 && db->db_dnode != NULL && 551 DBUF_IS_CACHEABLE(db); 552 553 mutex_enter(&db->db_mtx); 554 if (db->db_state == DB_CACHED) { 555 mutex_exit(&db->db_mtx); 556 if (prefetch) 557 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, 558 db->db.db_size, TRUE); 559 if ((flags & DB_RF_HAVESTRUCT) == 0) 560 rw_exit(&db->db_dnode->dn_struct_rwlock); 561 } else if (db->db_state == DB_UNCACHED) { 562 if (zio == NULL) { 563 zio = zio_root(db->db_dnode->dn_objset->os_spa, 564 NULL, NULL, ZIO_FLAG_CANFAIL); 565 } 566 dbuf_read_impl(db, zio, &flags); 567 568 /* dbuf_read_impl has dropped db_mtx for us */ 569 570 if (prefetch) 571 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, 572 db->db.db_size, flags & DB_RF_CACHED); 573 574 if ((flags & DB_RF_HAVESTRUCT) == 0) 575 rw_exit(&db->db_dnode->dn_struct_rwlock); 576 577 if (!havepzio) 578 err = zio_wait(zio); 579 } else { 580 mutex_exit(&db->db_mtx); 581 if (prefetch) 582 dmu_zfetch(&db->db_dnode->dn_zfetch, db->db.db_offset, 583 db->db.db_size, TRUE); 584 if ((flags & DB_RF_HAVESTRUCT) == 0) 585 rw_exit(&db->db_dnode->dn_struct_rwlock); 586 587 mutex_enter(&db->db_mtx); 588 if ((flags & DB_RF_NEVERWAIT) == 0) { 589 while (db->db_state == DB_READ || 590 db->db_state == DB_FILL) { 591 ASSERT(db->db_state == DB_READ || 592 (flags & DB_RF_HAVESTRUCT) == 0); 593 cv_wait(&db->db_changed, &db->db_mtx); 594 } 595 if (db->db_state == DB_UNCACHED) 596 err = EIO; 597 } 598 mutex_exit(&db->db_mtx); 599 } 600 601 ASSERT(err || havepzio || db->db_state == DB_CACHED); 602 return (err); 603 } 604 605 static void 606 dbuf_noread(dmu_buf_impl_t *db) 607 { 608 ASSERT(!refcount_is_zero(&db->db_holds)); 609 ASSERT(db->db_blkid != DB_BONUS_BLKID); 610 mutex_enter(&db->db_mtx); 611 while (db->db_state == DB_READ || db->db_state == DB_FILL) 612 cv_wait(&db->db_changed, &db->db_mtx); 613 if (db->db_state == DB_UNCACHED) { 614 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 615 616 ASSERT(db->db_buf == NULL); 617 ASSERT(db->db.db_data == NULL); 618 dbuf_set_data(db, arc_buf_alloc(db->db_dnode->dn_objset->os_spa, 619 db->db.db_size, db, type)); 620 db->db_state = DB_FILL; 621 } else if (db->db_state == DB_NOFILL) { 622 dbuf_set_data(db, NULL); 623 } else { 624 ASSERT3U(db->db_state, ==, DB_CACHED); 625 } 626 mutex_exit(&db->db_mtx); 627 } 628 629 /* 630 * This is our just-in-time copy function. It makes a copy of 631 * buffers, that have been modified in a previous transaction 632 * group, before we modify them in the current active group. 633 * 634 * This function is used in two places: when we are dirtying a 635 * buffer for the first time in a txg, and when we are freeing 636 * a range in a dnode that includes this buffer. 637 * 638 * Note that when we are called from dbuf_free_range() we do 639 * not put a hold on the buffer, we just traverse the active 640 * dbuf list for the dnode. 641 */ 642 static void 643 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 644 { 645 dbuf_dirty_record_t *dr = db->db_last_dirty; 646 647 ASSERT(MUTEX_HELD(&db->db_mtx)); 648 ASSERT(db->db.db_data != NULL); 649 ASSERT(db->db_level == 0); 650 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 651 652 if (dr == NULL || 653 (dr->dt.dl.dr_data != 654 ((db->db_blkid == DB_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 655 return; 656 657 /* 658 * If the last dirty record for this dbuf has not yet synced 659 * and its referencing the dbuf data, either: 660 * reset the reference to point to a new copy, 661 * or (if there a no active holders) 662 * just null out the current db_data pointer. 663 */ 664 ASSERT(dr->dr_txg >= txg - 2); 665 if (db->db_blkid == DB_BONUS_BLKID) { 666 /* Note that the data bufs here are zio_bufs */ 667 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 668 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 669 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 670 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 671 int size = db->db.db_size; 672 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 673 dr->dt.dl.dr_data = arc_buf_alloc( 674 db->db_dnode->dn_objset->os_spa, size, db, type); 675 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 676 } else { 677 dbuf_set_data(db, NULL); 678 } 679 } 680 681 void 682 dbuf_unoverride(dbuf_dirty_record_t *dr) 683 { 684 dmu_buf_impl_t *db = dr->dr_dbuf; 685 uint64_t txg = dr->dr_txg; 686 687 ASSERT(MUTEX_HELD(&db->db_mtx)); 688 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 689 ASSERT(db->db_level == 0); 690 691 if (db->db_blkid == DB_BONUS_BLKID || 692 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 693 return; 694 695 /* free this block */ 696 if (!BP_IS_HOLE(&dr->dt.dl.dr_overridden_by)) { 697 /* XXX can get silent EIO here */ 698 (void) dsl_free(NULL, 699 spa_get_dsl(db->db_dnode->dn_objset->os_spa), 700 txg, &dr->dt.dl.dr_overridden_by, NULL, NULL, ARC_WAIT); 701 } 702 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 703 /* 704 * Release the already-written buffer, so we leave it in 705 * a consistent dirty state. Note that all callers are 706 * modifying the buffer, so they will immediately do 707 * another (redundant) arc_release(). Therefore, leave 708 * the buf thawed to save the effort of freezing & 709 * immediately re-thawing it. 710 */ 711 arc_release(dr->dt.dl.dr_data, db); 712 } 713 714 /* 715 * Evict (if its unreferenced) or clear (if its referenced) any level-0 716 * data blocks in the free range, so that any future readers will find 717 * empty blocks. Also, if we happen accross any level-1 dbufs in the 718 * range that have not already been marked dirty, mark them dirty so 719 * they stay in memory. 720 */ 721 void 722 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx) 723 { 724 dmu_buf_impl_t *db, *db_next; 725 uint64_t txg = tx->tx_txg; 726 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 727 uint64_t first_l1 = start >> epbs; 728 uint64_t last_l1 = end >> epbs; 729 730 if (end > dn->dn_maxblkid) { 731 end = dn->dn_maxblkid; 732 last_l1 = end >> epbs; 733 } 734 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end); 735 mutex_enter(&dn->dn_dbufs_mtx); 736 for (db = list_head(&dn->dn_dbufs); db; db = db_next) { 737 db_next = list_next(&dn->dn_dbufs, db); 738 ASSERT(db->db_blkid != DB_BONUS_BLKID); 739 740 if (db->db_level == 1 && 741 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) { 742 mutex_enter(&db->db_mtx); 743 if (db->db_last_dirty && 744 db->db_last_dirty->dr_txg < txg) { 745 dbuf_add_ref(db, FTAG); 746 mutex_exit(&db->db_mtx); 747 dbuf_will_dirty(db, tx); 748 dbuf_rele(db, FTAG); 749 } else { 750 mutex_exit(&db->db_mtx); 751 } 752 } 753 754 if (db->db_level != 0) 755 continue; 756 dprintf_dbuf(db, "found buf %s\n", ""); 757 if (db->db_blkid < start || db->db_blkid > end) 758 continue; 759 760 /* found a level 0 buffer in the range */ 761 if (dbuf_undirty(db, tx)) 762 continue; 763 764 mutex_enter(&db->db_mtx); 765 if (db->db_state == DB_UNCACHED || 766 db->db_state == DB_NOFILL || 767 db->db_state == DB_EVICTING) { 768 ASSERT(db->db.db_data == NULL); 769 mutex_exit(&db->db_mtx); 770 continue; 771 } 772 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 773 /* will be handled in dbuf_read_done or dbuf_rele */ 774 db->db_freed_in_flight = TRUE; 775 mutex_exit(&db->db_mtx); 776 continue; 777 } 778 if (refcount_count(&db->db_holds) == 0) { 779 ASSERT(db->db_buf); 780 dbuf_clear(db); 781 continue; 782 } 783 /* The dbuf is referenced */ 784 785 if (db->db_last_dirty != NULL) { 786 dbuf_dirty_record_t *dr = db->db_last_dirty; 787 788 if (dr->dr_txg == txg) { 789 /* 790 * This buffer is "in-use", re-adjust the file 791 * size to reflect that this buffer may 792 * contain new data when we sync. 793 */ 794 if (db->db_blkid > dn->dn_maxblkid) 795 dn->dn_maxblkid = db->db_blkid; 796 dbuf_unoverride(dr); 797 } else { 798 /* 799 * This dbuf is not dirty in the open context. 800 * Either uncache it (if its not referenced in 801 * the open context) or reset its contents to 802 * empty. 803 */ 804 dbuf_fix_old_data(db, txg); 805 } 806 } 807 /* clear the contents if its cached */ 808 if (db->db_state == DB_CACHED) { 809 ASSERT(db->db.db_data != NULL); 810 arc_release(db->db_buf, db); 811 bzero(db->db.db_data, db->db.db_size); 812 arc_buf_freeze(db->db_buf); 813 } 814 815 mutex_exit(&db->db_mtx); 816 } 817 mutex_exit(&dn->dn_dbufs_mtx); 818 } 819 820 static int 821 dbuf_block_freeable(dmu_buf_impl_t *db) 822 { 823 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; 824 uint64_t birth_txg = 0; 825 826 /* 827 * We don't need any locking to protect db_blkptr: 828 * If it's syncing, then db_last_dirty will be set 829 * so we'll ignore db_blkptr. 830 */ 831 ASSERT(MUTEX_HELD(&db->db_mtx)); 832 if (db->db_last_dirty) 833 birth_txg = db->db_last_dirty->dr_txg; 834 else if (db->db_blkptr) 835 birth_txg = db->db_blkptr->blk_birth; 836 837 /* If we don't exist or are in a snapshot, we can't be freed */ 838 if (birth_txg) 839 return (ds == NULL || 840 dsl_dataset_block_freeable(ds, birth_txg)); 841 else 842 return (FALSE); 843 } 844 845 void 846 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 847 { 848 arc_buf_t *buf, *obuf; 849 int osize = db->db.db_size; 850 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 851 852 ASSERT(db->db_blkid != DB_BONUS_BLKID); 853 854 /* XXX does *this* func really need the lock? */ 855 ASSERT(RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)); 856 857 /* 858 * This call to dbuf_will_dirty() with the dn_struct_rwlock held 859 * is OK, because there can be no other references to the db 860 * when we are changing its size, so no concurrent DB_FILL can 861 * be happening. 862 */ 863 /* 864 * XXX we should be doing a dbuf_read, checking the return 865 * value and returning that up to our callers 866 */ 867 dbuf_will_dirty(db, tx); 868 869 /* create the data buffer for the new block */ 870 buf = arc_buf_alloc(db->db_dnode->dn_objset->os_spa, size, db, type); 871 872 /* copy old block data to the new block */ 873 obuf = db->db_buf; 874 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 875 /* zero the remainder */ 876 if (size > osize) 877 bzero((uint8_t *)buf->b_data + osize, size - osize); 878 879 mutex_enter(&db->db_mtx); 880 dbuf_set_data(db, buf); 881 VERIFY(arc_buf_remove_ref(obuf, db) == 1); 882 db->db.db_size = size; 883 884 if (db->db_level == 0) { 885 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 886 db->db_last_dirty->dt.dl.dr_data = buf; 887 } 888 mutex_exit(&db->db_mtx); 889 890 dnode_willuse_space(db->db_dnode, size-osize, tx); 891 } 892 893 dbuf_dirty_record_t * 894 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 895 { 896 dnode_t *dn = db->db_dnode; 897 objset_impl_t *os = dn->dn_objset; 898 dbuf_dirty_record_t **drp, *dr; 899 int drop_struct_lock = FALSE; 900 boolean_t do_free_accounting = B_FALSE; 901 int txgoff = tx->tx_txg & TXG_MASK; 902 903 ASSERT(tx->tx_txg != 0); 904 ASSERT(!refcount_is_zero(&db->db_holds)); 905 DMU_TX_DIRTY_BUF(tx, db); 906 907 /* 908 * Shouldn't dirty a regular buffer in syncing context. Private 909 * objects may be dirtied in syncing context, but only if they 910 * were already pre-dirtied in open context. 911 */ 912 ASSERT(!dmu_tx_is_syncing(tx) || 913 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 914 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 915 dn->dn_objset->os_dsl_dataset == NULL); 916 /* 917 * We make this assert for private objects as well, but after we 918 * check if we're already dirty. They are allowed to re-dirty 919 * in syncing context. 920 */ 921 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 922 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 923 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 924 925 mutex_enter(&db->db_mtx); 926 /* 927 * XXX make this true for indirects too? The problem is that 928 * transactions created with dmu_tx_create_assigned() from 929 * syncing context don't bother holding ahead. 930 */ 931 ASSERT(db->db_level != 0 || 932 db->db_state == DB_CACHED || db->db_state == DB_FILL || 933 db->db_state == DB_NOFILL); 934 935 mutex_enter(&dn->dn_mtx); 936 /* 937 * Don't set dirtyctx to SYNC if we're just modifying this as we 938 * initialize the objset. 939 */ 940 if (dn->dn_dirtyctx == DN_UNDIRTIED && 941 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 942 dn->dn_dirtyctx = 943 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); 944 ASSERT(dn->dn_dirtyctx_firstset == NULL); 945 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 946 } 947 mutex_exit(&dn->dn_mtx); 948 949 /* 950 * If this buffer is already dirty, we're done. 951 */ 952 drp = &db->db_last_dirty; 953 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 954 db->db.db_object == DMU_META_DNODE_OBJECT); 955 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 956 drp = &dr->dr_next; 957 if (dr && dr->dr_txg == tx->tx_txg) { 958 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) { 959 /* 960 * If this buffer has already been written out, 961 * we now need to reset its state. 962 */ 963 dbuf_unoverride(dr); 964 if (db->db.db_object != DMU_META_DNODE_OBJECT) 965 arc_buf_thaw(db->db_buf); 966 } 967 mutex_exit(&db->db_mtx); 968 return (dr); 969 } 970 971 /* 972 * Only valid if not already dirty. 973 */ 974 ASSERT(dn->dn_object == 0 || 975 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 976 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 977 978 ASSERT3U(dn->dn_nlevels, >, db->db_level); 979 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 980 dn->dn_phys->dn_nlevels > db->db_level || 981 dn->dn_next_nlevels[txgoff] > db->db_level || 982 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 983 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 984 985 /* 986 * We should only be dirtying in syncing context if it's the 987 * mos or we're initializing the os or it's a special object. 988 * However, we are allowed to dirty in syncing context provided 989 * we already dirtied it in open context. Hence we must make 990 * this assertion only if we're not already dirty. 991 */ 992 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 993 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 994 ASSERT(db->db.db_size != 0); 995 996 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 997 998 if (db->db_blkid != DB_BONUS_BLKID) { 999 /* 1000 * Update the accounting. 1001 * Note: we delay "free accounting" until after we drop 1002 * the db_mtx. This keeps us from grabbing other locks 1003 * (and possibly deadlocking) in bp_get_dasize() while 1004 * also holding the db_mtx. 1005 */ 1006 dnode_willuse_space(dn, db->db.db_size, tx); 1007 do_free_accounting = dbuf_block_freeable(db); 1008 } 1009 1010 /* 1011 * If this buffer is dirty in an old transaction group we need 1012 * to make a copy of it so that the changes we make in this 1013 * transaction group won't leak out when we sync the older txg. 1014 */ 1015 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1016 if (db->db_level == 0) { 1017 void *data_old = db->db_buf; 1018 1019 if (db->db_state != DB_NOFILL) { 1020 if (db->db_blkid == DB_BONUS_BLKID) { 1021 dbuf_fix_old_data(db, tx->tx_txg); 1022 data_old = db->db.db_data; 1023 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1024 /* 1025 * Release the data buffer from the cache so 1026 * that we can modify it without impacting 1027 * possible other users of this cached data 1028 * block. Note that indirect blocks and 1029 * private objects are not released until the 1030 * syncing state (since they are only modified 1031 * then). 1032 */ 1033 arc_release(db->db_buf, db); 1034 dbuf_fix_old_data(db, tx->tx_txg); 1035 data_old = db->db_buf; 1036 } 1037 ASSERT(data_old != NULL); 1038 } 1039 dr->dt.dl.dr_data = data_old; 1040 } else { 1041 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1042 list_create(&dr->dt.di.dr_children, 1043 sizeof (dbuf_dirty_record_t), 1044 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1045 } 1046 dr->dr_dbuf = db; 1047 dr->dr_txg = tx->tx_txg; 1048 dr->dr_next = *drp; 1049 *drp = dr; 1050 1051 /* 1052 * We could have been freed_in_flight between the dbuf_noread 1053 * and dbuf_dirty. We win, as though the dbuf_noread() had 1054 * happened after the free. 1055 */ 1056 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID) { 1057 mutex_enter(&dn->dn_mtx); 1058 dnode_clear_range(dn, db->db_blkid, 1, tx); 1059 mutex_exit(&dn->dn_mtx); 1060 db->db_freed_in_flight = FALSE; 1061 } 1062 1063 /* 1064 * This buffer is now part of this txg 1065 */ 1066 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1067 db->db_dirtycnt += 1; 1068 ASSERT3U(db->db_dirtycnt, <=, 3); 1069 1070 mutex_exit(&db->db_mtx); 1071 1072 if (db->db_blkid == DB_BONUS_BLKID) { 1073 mutex_enter(&dn->dn_mtx); 1074 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1075 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1076 mutex_exit(&dn->dn_mtx); 1077 dnode_setdirty(dn, tx); 1078 return (dr); 1079 } else if (do_free_accounting) { 1080 blkptr_t *bp = db->db_blkptr; 1081 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? 1082 bp_get_dasize(os->os_spa, bp) : db->db.db_size; 1083 /* 1084 * This is only a guess -- if the dbuf is dirty 1085 * in a previous txg, we don't know how much 1086 * space it will use on disk yet. We should 1087 * really have the struct_rwlock to access 1088 * db_blkptr, but since this is just a guess, 1089 * it's OK if we get an odd answer. 1090 */ 1091 dnode_willuse_space(dn, -willfree, tx); 1092 } 1093 1094 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1095 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1096 drop_struct_lock = TRUE; 1097 } 1098 1099 if (db->db_level == 0) { 1100 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1101 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1102 } 1103 1104 if (db->db_level+1 < dn->dn_nlevels) { 1105 dmu_buf_impl_t *parent = db->db_parent; 1106 dbuf_dirty_record_t *di; 1107 int parent_held = FALSE; 1108 1109 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1110 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1111 1112 parent = dbuf_hold_level(dn, db->db_level+1, 1113 db->db_blkid >> epbs, FTAG); 1114 parent_held = TRUE; 1115 } 1116 if (drop_struct_lock) 1117 rw_exit(&dn->dn_struct_rwlock); 1118 ASSERT3U(db->db_level+1, ==, parent->db_level); 1119 di = dbuf_dirty(parent, tx); 1120 if (parent_held) 1121 dbuf_rele(parent, FTAG); 1122 1123 mutex_enter(&db->db_mtx); 1124 /* possible race with dbuf_undirty() */ 1125 if (db->db_last_dirty == dr || 1126 dn->dn_object == DMU_META_DNODE_OBJECT) { 1127 mutex_enter(&di->dt.di.dr_mtx); 1128 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1129 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1130 list_insert_tail(&di->dt.di.dr_children, dr); 1131 mutex_exit(&di->dt.di.dr_mtx); 1132 dr->dr_parent = di; 1133 } 1134 mutex_exit(&db->db_mtx); 1135 } else { 1136 ASSERT(db->db_level+1 == dn->dn_nlevels); 1137 ASSERT(db->db_blkid < dn->dn_nblkptr); 1138 ASSERT(db->db_parent == NULL || 1139 db->db_parent == db->db_dnode->dn_dbuf); 1140 mutex_enter(&dn->dn_mtx); 1141 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1142 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1143 mutex_exit(&dn->dn_mtx); 1144 if (drop_struct_lock) 1145 rw_exit(&dn->dn_struct_rwlock); 1146 } 1147 1148 dnode_setdirty(dn, tx); 1149 return (dr); 1150 } 1151 1152 static int 1153 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1154 { 1155 dnode_t *dn = db->db_dnode; 1156 uint64_t txg = tx->tx_txg; 1157 dbuf_dirty_record_t *dr, **drp; 1158 1159 ASSERT(txg != 0); 1160 ASSERT(db->db_blkid != DB_BONUS_BLKID); 1161 1162 mutex_enter(&db->db_mtx); 1163 1164 /* 1165 * If this buffer is not dirty, we're done. 1166 */ 1167 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1168 if (dr->dr_txg <= txg) 1169 break; 1170 if (dr == NULL || dr->dr_txg < txg) { 1171 mutex_exit(&db->db_mtx); 1172 return (0); 1173 } 1174 ASSERT(dr->dr_txg == txg); 1175 1176 /* 1177 * If this buffer is currently held, we cannot undirty 1178 * it, since one of the current holders may be in the 1179 * middle of an update. Note that users of dbuf_undirty() 1180 * should not place a hold on the dbuf before the call. 1181 */ 1182 if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 1183 mutex_exit(&db->db_mtx); 1184 /* Make sure we don't toss this buffer at sync phase */ 1185 mutex_enter(&dn->dn_mtx); 1186 dnode_clear_range(dn, db->db_blkid, 1, tx); 1187 mutex_exit(&dn->dn_mtx); 1188 return (0); 1189 } 1190 1191 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1192 1193 ASSERT(db->db.db_size != 0); 1194 1195 /* XXX would be nice to fix up dn_towrite_space[] */ 1196 1197 *drp = dr->dr_next; 1198 1199 if (dr->dr_parent) { 1200 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1201 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1202 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1203 } else if (db->db_level+1 == dn->dn_nlevels) { 1204 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1205 mutex_enter(&dn->dn_mtx); 1206 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1207 mutex_exit(&dn->dn_mtx); 1208 } 1209 1210 if (db->db_level == 0) { 1211 if (db->db_state != DB_NOFILL) { 1212 dbuf_unoverride(dr); 1213 1214 ASSERT(db->db_buf != NULL); 1215 ASSERT(dr->dt.dl.dr_data != NULL); 1216 if (dr->dt.dl.dr_data != db->db_buf) 1217 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 1218 db) == 1); 1219 } 1220 } else { 1221 ASSERT(db->db_buf != NULL); 1222 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 1223 mutex_destroy(&dr->dt.di.dr_mtx); 1224 list_destroy(&dr->dt.di.dr_children); 1225 } 1226 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1227 1228 ASSERT(db->db_dirtycnt > 0); 1229 db->db_dirtycnt -= 1; 1230 1231 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1232 arc_buf_t *buf = db->db_buf; 1233 1234 ASSERT(arc_released(buf)); 1235 dbuf_set_data(db, NULL); 1236 VERIFY(arc_buf_remove_ref(buf, db) == 1); 1237 dbuf_evict(db); 1238 return (1); 1239 } 1240 1241 mutex_exit(&db->db_mtx); 1242 return (0); 1243 } 1244 1245 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty 1246 void 1247 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1248 { 1249 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1250 1251 ASSERT(tx->tx_txg != 0); 1252 ASSERT(!refcount_is_zero(&db->db_holds)); 1253 1254 if (RW_WRITE_HELD(&db->db_dnode->dn_struct_rwlock)) 1255 rf |= DB_RF_HAVESTRUCT; 1256 (void) dbuf_read(db, NULL, rf); 1257 (void) dbuf_dirty(db, tx); 1258 } 1259 1260 void 1261 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1262 { 1263 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1264 1265 db->db_state = DB_NOFILL; 1266 1267 dmu_buf_will_fill(db_fake, tx); 1268 } 1269 1270 void 1271 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1272 { 1273 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1274 1275 ASSERT(db->db_blkid != DB_BONUS_BLKID); 1276 ASSERT(tx->tx_txg != 0); 1277 ASSERT(db->db_level == 0); 1278 ASSERT(!refcount_is_zero(&db->db_holds)); 1279 1280 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1281 dmu_tx_private_ok(tx)); 1282 1283 dbuf_noread(db); 1284 (void) dbuf_dirty(db, tx); 1285 } 1286 1287 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1288 /* ARGSUSED */ 1289 void 1290 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1291 { 1292 mutex_enter(&db->db_mtx); 1293 DBUF_VERIFY(db); 1294 1295 if (db->db_state == DB_FILL) { 1296 if (db->db_level == 0 && db->db_freed_in_flight) { 1297 ASSERT(db->db_blkid != DB_BONUS_BLKID); 1298 /* we were freed while filling */ 1299 /* XXX dbuf_undirty? */ 1300 bzero(db->db.db_data, db->db.db_size); 1301 db->db_freed_in_flight = FALSE; 1302 } 1303 db->db_state = DB_CACHED; 1304 cv_broadcast(&db->db_changed); 1305 } 1306 mutex_exit(&db->db_mtx); 1307 } 1308 1309 /* 1310 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1311 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1312 */ 1313 void 1314 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1315 { 1316 ASSERT(!refcount_is_zero(&db->db_holds)); 1317 ASSERT(db->db_dnode->dn_object != DMU_META_DNODE_OBJECT); 1318 ASSERT(db->db_blkid != DB_BONUS_BLKID); 1319 ASSERT(db->db_level == 0); 1320 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA); 1321 ASSERT(buf != NULL); 1322 ASSERT(arc_buf_size(buf) == db->db.db_size); 1323 ASSERT(tx->tx_txg != 0); 1324 1325 arc_return_buf(buf, db); 1326 ASSERT(arc_released(buf)); 1327 1328 mutex_enter(&db->db_mtx); 1329 1330 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1331 cv_wait(&db->db_changed, &db->db_mtx); 1332 1333 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1334 1335 if (db->db_state == DB_CACHED && 1336 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1337 mutex_exit(&db->db_mtx); 1338 (void) dbuf_dirty(db, tx); 1339 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1340 VERIFY(arc_buf_remove_ref(buf, db) == 1); 1341 return; 1342 } 1343 1344 if (db->db_state == DB_CACHED) { 1345 dbuf_dirty_record_t *dr = db->db_last_dirty; 1346 1347 ASSERT(db->db_buf != NULL); 1348 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1349 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1350 if (!arc_released(db->db_buf)) { 1351 ASSERT(dr->dt.dl.dr_override_state == 1352 DR_OVERRIDDEN); 1353 arc_release(db->db_buf, db); 1354 } 1355 dr->dt.dl.dr_data = buf; 1356 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1); 1357 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 1358 arc_release(db->db_buf, db); 1359 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 1); 1360 } 1361 db->db_buf = NULL; 1362 } 1363 ASSERT(db->db_buf == NULL); 1364 dbuf_set_data(db, buf); 1365 db->db_state = DB_FILL; 1366 mutex_exit(&db->db_mtx); 1367 (void) dbuf_dirty(db, tx); 1368 dbuf_fill_done(db, tx); 1369 } 1370 1371 /* 1372 * "Clear" the contents of this dbuf. This will mark the dbuf 1373 * EVICTING and clear *most* of its references. Unfortunetely, 1374 * when we are not holding the dn_dbufs_mtx, we can't clear the 1375 * entry in the dn_dbufs list. We have to wait until dbuf_destroy() 1376 * in this case. For callers from the DMU we will usually see: 1377 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() 1378 * For the arc callback, we will usually see: 1379 * dbuf_do_evict()->dbuf_clear();dbuf_destroy() 1380 * Sometimes, though, we will get a mix of these two: 1381 * DMU: dbuf_clear()->arc_buf_evict() 1382 * ARC: dbuf_do_evict()->dbuf_destroy() 1383 */ 1384 void 1385 dbuf_clear(dmu_buf_impl_t *db) 1386 { 1387 dnode_t *dn = db->db_dnode; 1388 dmu_buf_impl_t *parent = db->db_parent; 1389 dmu_buf_impl_t *dndb = dn->dn_dbuf; 1390 int dbuf_gone = FALSE; 1391 1392 ASSERT(MUTEX_HELD(&db->db_mtx)); 1393 ASSERT(refcount_is_zero(&db->db_holds)); 1394 1395 dbuf_evict_user(db); 1396 1397 if (db->db_state == DB_CACHED) { 1398 ASSERT(db->db.db_data != NULL); 1399 if (db->db_blkid == DB_BONUS_BLKID) { 1400 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 1401 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1402 } 1403 db->db.db_data = NULL; 1404 db->db_state = DB_UNCACHED; 1405 } 1406 1407 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1408 ASSERT(db->db_data_pending == NULL); 1409 1410 db->db_state = DB_EVICTING; 1411 db->db_blkptr = NULL; 1412 1413 if (db->db_blkid != DB_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { 1414 list_remove(&dn->dn_dbufs, db); 1415 dnode_rele(dn, db); 1416 db->db_dnode = NULL; 1417 } 1418 1419 if (db->db_buf) 1420 dbuf_gone = arc_buf_evict(db->db_buf); 1421 1422 if (!dbuf_gone) 1423 mutex_exit(&db->db_mtx); 1424 1425 /* 1426 * If this dbuf is referened from an indirect dbuf, 1427 * decrement the ref count on the indirect dbuf. 1428 */ 1429 if (parent && parent != dndb) 1430 dbuf_rele(parent, db); 1431 } 1432 1433 static int 1434 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 1435 dmu_buf_impl_t **parentp, blkptr_t **bpp) 1436 { 1437 int nlevels, epbs; 1438 1439 *parentp = NULL; 1440 *bpp = NULL; 1441 1442 ASSERT(blkid != DB_BONUS_BLKID); 1443 1444 if (dn->dn_phys->dn_nlevels == 0) 1445 nlevels = 1; 1446 else 1447 nlevels = dn->dn_phys->dn_nlevels; 1448 1449 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1450 1451 ASSERT3U(level * epbs, <, 64); 1452 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1453 if (level >= nlevels || 1454 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 1455 /* the buffer has no parent yet */ 1456 return (ENOENT); 1457 } else if (level < nlevels-1) { 1458 /* this block is referenced from an indirect block */ 1459 int err = dbuf_hold_impl(dn, level+1, 1460 blkid >> epbs, fail_sparse, NULL, parentp); 1461 if (err) 1462 return (err); 1463 err = dbuf_read(*parentp, NULL, 1464 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 1465 if (err) { 1466 dbuf_rele(*parentp, NULL); 1467 *parentp = NULL; 1468 return (err); 1469 } 1470 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 1471 (blkid & ((1ULL << epbs) - 1)); 1472 return (0); 1473 } else { 1474 /* the block is referenced from the dnode */ 1475 ASSERT3U(level, ==, nlevels-1); 1476 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 1477 blkid < dn->dn_phys->dn_nblkptr); 1478 if (dn->dn_dbuf) { 1479 dbuf_add_ref(dn->dn_dbuf, NULL); 1480 *parentp = dn->dn_dbuf; 1481 } 1482 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 1483 return (0); 1484 } 1485 } 1486 1487 static dmu_buf_impl_t * 1488 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 1489 dmu_buf_impl_t *parent, blkptr_t *blkptr) 1490 { 1491 objset_impl_t *os = dn->dn_objset; 1492 dmu_buf_impl_t *db, *odb; 1493 1494 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1495 ASSERT(dn->dn_type != DMU_OT_NONE); 1496 1497 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); 1498 1499 db->db_objset = os; 1500 db->db.db_object = dn->dn_object; 1501 db->db_level = level; 1502 db->db_blkid = blkid; 1503 db->db_last_dirty = NULL; 1504 db->db_dirtycnt = 0; 1505 db->db_dnode = dn; 1506 db->db_parent = parent; 1507 db->db_blkptr = blkptr; 1508 1509 db->db_user_ptr = NULL; 1510 db->db_user_data_ptr_ptr = NULL; 1511 db->db_evict_func = NULL; 1512 db->db_immediate_evict = 0; 1513 db->db_freed_in_flight = 0; 1514 1515 if (blkid == DB_BONUS_BLKID) { 1516 ASSERT3P(parent, ==, dn->dn_dbuf); 1517 db->db.db_size = DN_MAX_BONUSLEN - 1518 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 1519 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1520 db->db.db_offset = DB_BONUS_BLKID; 1521 db->db_state = DB_UNCACHED; 1522 /* the bonus dbuf is not placed in the hash table */ 1523 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1524 return (db); 1525 } else { 1526 int blocksize = 1527 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz; 1528 db->db.db_size = blocksize; 1529 db->db.db_offset = db->db_blkid * blocksize; 1530 } 1531 1532 /* 1533 * Hold the dn_dbufs_mtx while we get the new dbuf 1534 * in the hash table *and* added to the dbufs list. 1535 * This prevents a possible deadlock with someone 1536 * trying to look up this dbuf before its added to the 1537 * dn_dbufs list. 1538 */ 1539 mutex_enter(&dn->dn_dbufs_mtx); 1540 db->db_state = DB_EVICTING; 1541 if ((odb = dbuf_hash_insert(db)) != NULL) { 1542 /* someone else inserted it first */ 1543 kmem_cache_free(dbuf_cache, db); 1544 mutex_exit(&dn->dn_dbufs_mtx); 1545 return (odb); 1546 } 1547 list_insert_head(&dn->dn_dbufs, db); 1548 db->db_state = DB_UNCACHED; 1549 mutex_exit(&dn->dn_dbufs_mtx); 1550 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1551 1552 if (parent && parent != dn->dn_dbuf) 1553 dbuf_add_ref(parent, db); 1554 1555 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1556 refcount_count(&dn->dn_holds) > 0); 1557 (void) refcount_add(&dn->dn_holds, db); 1558 1559 dprintf_dbuf(db, "db=%p\n", db); 1560 1561 return (db); 1562 } 1563 1564 static int 1565 dbuf_do_evict(void *private) 1566 { 1567 arc_buf_t *buf = private; 1568 dmu_buf_impl_t *db = buf->b_private; 1569 1570 if (!MUTEX_HELD(&db->db_mtx)) 1571 mutex_enter(&db->db_mtx); 1572 1573 ASSERT(refcount_is_zero(&db->db_holds)); 1574 1575 if (db->db_state != DB_EVICTING) { 1576 ASSERT(db->db_state == DB_CACHED); 1577 DBUF_VERIFY(db); 1578 db->db_buf = NULL; 1579 dbuf_evict(db); 1580 } else { 1581 mutex_exit(&db->db_mtx); 1582 dbuf_destroy(db); 1583 } 1584 return (0); 1585 } 1586 1587 static void 1588 dbuf_destroy(dmu_buf_impl_t *db) 1589 { 1590 ASSERT(refcount_is_zero(&db->db_holds)); 1591 1592 if (db->db_blkid != DB_BONUS_BLKID) { 1593 /* 1594 * If this dbuf is still on the dn_dbufs list, 1595 * remove it from that list. 1596 */ 1597 if (db->db_dnode) { 1598 dnode_t *dn = db->db_dnode; 1599 1600 mutex_enter(&dn->dn_dbufs_mtx); 1601 list_remove(&dn->dn_dbufs, db); 1602 mutex_exit(&dn->dn_dbufs_mtx); 1603 1604 dnode_rele(dn, db); 1605 db->db_dnode = NULL; 1606 } 1607 dbuf_hash_remove(db); 1608 } 1609 db->db_parent = NULL; 1610 db->db_buf = NULL; 1611 1612 ASSERT(!list_link_active(&db->db_link)); 1613 ASSERT(db->db.db_data == NULL); 1614 ASSERT(db->db_hash_next == NULL); 1615 ASSERT(db->db_blkptr == NULL); 1616 ASSERT(db->db_data_pending == NULL); 1617 1618 kmem_cache_free(dbuf_cache, db); 1619 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1620 } 1621 1622 void 1623 dbuf_prefetch(dnode_t *dn, uint64_t blkid) 1624 { 1625 dmu_buf_impl_t *db = NULL; 1626 blkptr_t *bp = NULL; 1627 1628 ASSERT(blkid != DB_BONUS_BLKID); 1629 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1630 1631 if (dnode_block_freed(dn, blkid)) 1632 return; 1633 1634 /* dbuf_find() returns with db_mtx held */ 1635 if (db = dbuf_find(dn, 0, blkid)) { 1636 if (refcount_count(&db->db_holds) > 0) { 1637 /* 1638 * This dbuf is active. We assume that it is 1639 * already CACHED, or else about to be either 1640 * read or filled. 1641 */ 1642 mutex_exit(&db->db_mtx); 1643 return; 1644 } 1645 mutex_exit(&db->db_mtx); 1646 db = NULL; 1647 } 1648 1649 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) { 1650 if (bp && !BP_IS_HOLE(bp)) { 1651 arc_buf_t *pbuf; 1652 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; 1653 zbookmark_t zb; 1654 zb.zb_objset = dn->dn_objset->os_dsl_dataset ? 1655 dn->dn_objset->os_dsl_dataset->ds_object : 0; 1656 zb.zb_object = dn->dn_object; 1657 zb.zb_level = 0; 1658 zb.zb_blkid = blkid; 1659 1660 if (db) 1661 pbuf = db->db_buf; 1662 else 1663 pbuf = dn->dn_objset->os_phys_buf; 1664 1665 (void) arc_read(NULL, dn->dn_objset->os_spa, 1666 bp, pbuf, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 1667 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1668 &aflags, &zb); 1669 } 1670 if (db) 1671 dbuf_rele(db, NULL); 1672 } 1673 } 1674 1675 /* 1676 * Returns with db_holds incremented, and db_mtx not held. 1677 * Note: dn_struct_rwlock must be held. 1678 */ 1679 int 1680 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, 1681 void *tag, dmu_buf_impl_t **dbp) 1682 { 1683 dmu_buf_impl_t *db, *parent = NULL; 1684 1685 ASSERT(blkid != DB_BONUS_BLKID); 1686 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1687 ASSERT3U(dn->dn_nlevels, >, level); 1688 1689 *dbp = NULL; 1690 top: 1691 /* dbuf_find() returns with db_mtx held */ 1692 db = dbuf_find(dn, level, blkid); 1693 1694 if (db == NULL) { 1695 blkptr_t *bp = NULL; 1696 int err; 1697 1698 ASSERT3P(parent, ==, NULL); 1699 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 1700 if (fail_sparse) { 1701 if (err == 0 && bp && BP_IS_HOLE(bp)) 1702 err = ENOENT; 1703 if (err) { 1704 if (parent) 1705 dbuf_rele(parent, NULL); 1706 return (err); 1707 } 1708 } 1709 if (err && err != ENOENT) 1710 return (err); 1711 db = dbuf_create(dn, level, blkid, parent, bp); 1712 } 1713 1714 if (db->db_buf && refcount_is_zero(&db->db_holds)) { 1715 arc_buf_add_ref(db->db_buf, db); 1716 if (db->db_buf->b_data == NULL) { 1717 dbuf_clear(db); 1718 if (parent) { 1719 dbuf_rele(parent, NULL); 1720 parent = NULL; 1721 } 1722 goto top; 1723 } 1724 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 1725 } 1726 1727 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 1728 1729 /* 1730 * If this buffer is currently syncing out, and we are are 1731 * still referencing it from db_data, we need to make a copy 1732 * of it in case we decide we want to dirty it again in this txg. 1733 */ 1734 if (db->db_level == 0 && db->db_blkid != DB_BONUS_BLKID && 1735 dn->dn_object != DMU_META_DNODE_OBJECT && 1736 db->db_state == DB_CACHED && db->db_data_pending) { 1737 dbuf_dirty_record_t *dr = db->db_data_pending; 1738 1739 if (dr->dt.dl.dr_data == db->db_buf) { 1740 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1741 1742 dbuf_set_data(db, 1743 arc_buf_alloc(db->db_dnode->dn_objset->os_spa, 1744 db->db.db_size, db, type)); 1745 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 1746 db->db.db_size); 1747 } 1748 } 1749 1750 (void) refcount_add(&db->db_holds, tag); 1751 dbuf_update_data(db); 1752 DBUF_VERIFY(db); 1753 mutex_exit(&db->db_mtx); 1754 1755 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 1756 if (parent) 1757 dbuf_rele(parent, NULL); 1758 1759 ASSERT3P(db->db_dnode, ==, dn); 1760 ASSERT3U(db->db_blkid, ==, blkid); 1761 ASSERT3U(db->db_level, ==, level); 1762 *dbp = db; 1763 1764 return (0); 1765 } 1766 1767 dmu_buf_impl_t * 1768 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 1769 { 1770 dmu_buf_impl_t *db; 1771 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db); 1772 return (err ? NULL : db); 1773 } 1774 1775 dmu_buf_impl_t * 1776 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 1777 { 1778 dmu_buf_impl_t *db; 1779 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db); 1780 return (err ? NULL : db); 1781 } 1782 1783 void 1784 dbuf_create_bonus(dnode_t *dn) 1785 { 1786 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1787 1788 ASSERT(dn->dn_bonus == NULL); 1789 dn->dn_bonus = dbuf_create(dn, 0, DB_BONUS_BLKID, dn->dn_dbuf, NULL); 1790 } 1791 1792 #pragma weak dmu_buf_add_ref = dbuf_add_ref 1793 void 1794 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 1795 { 1796 int64_t holds = refcount_add(&db->db_holds, tag); 1797 ASSERT(holds > 1); 1798 } 1799 1800 #pragma weak dmu_buf_rele = dbuf_rele 1801 void 1802 dbuf_rele(dmu_buf_impl_t *db, void *tag) 1803 { 1804 int64_t holds; 1805 1806 mutex_enter(&db->db_mtx); 1807 DBUF_VERIFY(db); 1808 1809 holds = refcount_remove(&db->db_holds, tag); 1810 ASSERT(holds >= 0); 1811 1812 /* 1813 * We can't freeze indirects if there is a possibility that they 1814 * may be modified in the current syncing context. 1815 */ 1816 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) 1817 arc_buf_freeze(db->db_buf); 1818 1819 if (holds == db->db_dirtycnt && 1820 db->db_level == 0 && db->db_immediate_evict) 1821 dbuf_evict_user(db); 1822 1823 if (holds == 0) { 1824 if (db->db_blkid == DB_BONUS_BLKID) { 1825 mutex_exit(&db->db_mtx); 1826 dnode_rele(db->db_dnode, db); 1827 } else if (db->db_buf == NULL) { 1828 /* 1829 * This is a special case: we never associated this 1830 * dbuf with any data allocated from the ARC. 1831 */ 1832 ASSERT(db->db_state == DB_UNCACHED || 1833 db->db_state == DB_NOFILL); 1834 dbuf_evict(db); 1835 } else if (arc_released(db->db_buf)) { 1836 arc_buf_t *buf = db->db_buf; 1837 /* 1838 * This dbuf has anonymous data associated with it. 1839 */ 1840 dbuf_set_data(db, NULL); 1841 VERIFY(arc_buf_remove_ref(buf, db) == 1); 1842 dbuf_evict(db); 1843 } else { 1844 VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0); 1845 if (!DBUF_IS_CACHEABLE(db)) 1846 dbuf_clear(db); 1847 else 1848 mutex_exit(&db->db_mtx); 1849 } 1850 } else { 1851 mutex_exit(&db->db_mtx); 1852 } 1853 } 1854 1855 #pragma weak dmu_buf_refcount = dbuf_refcount 1856 uint64_t 1857 dbuf_refcount(dmu_buf_impl_t *db) 1858 { 1859 return (refcount_count(&db->db_holds)); 1860 } 1861 1862 void * 1863 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 1864 dmu_buf_evict_func_t *evict_func) 1865 { 1866 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 1867 user_data_ptr_ptr, evict_func)); 1868 } 1869 1870 void * 1871 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 1872 dmu_buf_evict_func_t *evict_func) 1873 { 1874 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1875 1876 db->db_immediate_evict = TRUE; 1877 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 1878 user_data_ptr_ptr, evict_func)); 1879 } 1880 1881 void * 1882 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr, 1883 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func) 1884 { 1885 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1886 ASSERT(db->db_level == 0); 1887 1888 ASSERT((user_ptr == NULL) == (evict_func == NULL)); 1889 1890 mutex_enter(&db->db_mtx); 1891 1892 if (db->db_user_ptr == old_user_ptr) { 1893 db->db_user_ptr = user_ptr; 1894 db->db_user_data_ptr_ptr = user_data_ptr_ptr; 1895 db->db_evict_func = evict_func; 1896 1897 dbuf_update_data(db); 1898 } else { 1899 old_user_ptr = db->db_user_ptr; 1900 } 1901 1902 mutex_exit(&db->db_mtx); 1903 return (old_user_ptr); 1904 } 1905 1906 void * 1907 dmu_buf_get_user(dmu_buf_t *db_fake) 1908 { 1909 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1910 ASSERT(!refcount_is_zero(&db->db_holds)); 1911 1912 return (db->db_user_ptr); 1913 } 1914 1915 boolean_t 1916 dmu_buf_freeable(dmu_buf_t *dbuf) 1917 { 1918 boolean_t res = B_FALSE; 1919 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1920 1921 if (db->db_blkptr) 1922 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset, 1923 db->db_blkptr->blk_birth); 1924 1925 return (res); 1926 } 1927 1928 static void 1929 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 1930 { 1931 /* ASSERT(dmu_tx_is_syncing(tx) */ 1932 ASSERT(MUTEX_HELD(&db->db_mtx)); 1933 1934 if (db->db_blkptr != NULL) 1935 return; 1936 1937 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 1938 /* 1939 * This buffer was allocated at a time when there was 1940 * no available blkptrs from the dnode, or it was 1941 * inappropriate to hook it in (i.e., nlevels mis-match). 1942 */ 1943 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 1944 ASSERT(db->db_parent == NULL); 1945 db->db_parent = dn->dn_dbuf; 1946 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 1947 DBUF_VERIFY(db); 1948 } else { 1949 dmu_buf_impl_t *parent = db->db_parent; 1950 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 1951 1952 ASSERT(dn->dn_phys->dn_nlevels > 1); 1953 if (parent == NULL) { 1954 mutex_exit(&db->db_mtx); 1955 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1956 (void) dbuf_hold_impl(dn, db->db_level+1, 1957 db->db_blkid >> epbs, FALSE, db, &parent); 1958 rw_exit(&dn->dn_struct_rwlock); 1959 mutex_enter(&db->db_mtx); 1960 db->db_parent = parent; 1961 } 1962 db->db_blkptr = (blkptr_t *)parent->db.db_data + 1963 (db->db_blkid & ((1ULL << epbs) - 1)); 1964 DBUF_VERIFY(db); 1965 } 1966 } 1967 1968 static void 1969 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 1970 { 1971 dmu_buf_impl_t *db = dr->dr_dbuf; 1972 dnode_t *dn = db->db_dnode; 1973 zio_t *zio; 1974 1975 ASSERT(dmu_tx_is_syncing(tx)); 1976 1977 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 1978 1979 mutex_enter(&db->db_mtx); 1980 1981 ASSERT(db->db_level > 0); 1982 DBUF_VERIFY(db); 1983 1984 if (db->db_buf == NULL) { 1985 mutex_exit(&db->db_mtx); 1986 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 1987 mutex_enter(&db->db_mtx); 1988 } 1989 ASSERT3U(db->db_state, ==, DB_CACHED); 1990 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 1991 ASSERT(db->db_buf != NULL); 1992 1993 dbuf_check_blkptr(dn, db); 1994 1995 db->db_data_pending = dr; 1996 1997 mutex_exit(&db->db_mtx); 1998 dbuf_write(dr, db->db_buf, tx); 1999 2000 zio = dr->dr_zio; 2001 mutex_enter(&dr->dt.di.dr_mtx); 2002 dbuf_sync_list(&dr->dt.di.dr_children, tx); 2003 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2004 mutex_exit(&dr->dt.di.dr_mtx); 2005 zio_nowait(zio); 2006 } 2007 2008 static void 2009 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2010 { 2011 arc_buf_t **datap = &dr->dt.dl.dr_data; 2012 dmu_buf_impl_t *db = dr->dr_dbuf; 2013 dnode_t *dn = db->db_dnode; 2014 objset_impl_t *os = dn->dn_objset; 2015 uint64_t txg = tx->tx_txg; 2016 2017 ASSERT(dmu_tx_is_syncing(tx)); 2018 2019 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2020 2021 mutex_enter(&db->db_mtx); 2022 /* 2023 * To be synced, we must be dirtied. But we 2024 * might have been freed after the dirty. 2025 */ 2026 if (db->db_state == DB_UNCACHED) { 2027 /* This buffer has been freed since it was dirtied */ 2028 ASSERT(db->db.db_data == NULL); 2029 } else if (db->db_state == DB_FILL) { 2030 /* This buffer was freed and is now being re-filled */ 2031 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 2032 } else { 2033 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 2034 } 2035 DBUF_VERIFY(db); 2036 2037 /* 2038 * If this is a bonus buffer, simply copy the bonus data into the 2039 * dnode. It will be written out when the dnode is synced (and it 2040 * will be synced, since it must have been dirty for dbuf_sync to 2041 * be called). 2042 */ 2043 if (db->db_blkid == DB_BONUS_BLKID) { 2044 dbuf_dirty_record_t **drp; 2045 2046 ASSERT(*datap != NULL); 2047 ASSERT3U(db->db_level, ==, 0); 2048 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 2049 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 2050 if (*datap != db->db.db_data) { 2051 zio_buf_free(*datap, DN_MAX_BONUSLEN); 2052 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2053 } 2054 db->db_data_pending = NULL; 2055 drp = &db->db_last_dirty; 2056 while (*drp != dr) 2057 drp = &(*drp)->dr_next; 2058 ASSERT(dr->dr_next == NULL); 2059 *drp = dr->dr_next; 2060 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2061 ASSERT(db->db_dirtycnt > 0); 2062 db->db_dirtycnt -= 1; 2063 mutex_exit(&db->db_mtx); 2064 dbuf_rele(db, (void *)(uintptr_t)txg); 2065 return; 2066 } 2067 2068 /* 2069 * This function may have dropped the db_mtx lock allowing a dmu_sync 2070 * operation to sneak in. As a result, we need to ensure that we 2071 * don't check the dr_override_state until we have returned from 2072 * dbuf_check_blkptr. 2073 */ 2074 dbuf_check_blkptr(dn, db); 2075 2076 /* 2077 * If this buffer is in the middle of an immdiate write, 2078 * wait for the synchronous IO to complete. 2079 */ 2080 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 2081 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 2082 cv_wait(&db->db_changed, &db->db_mtx); 2083 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 2084 } 2085 2086 /* 2087 * If this dbuf has already been written out via an immediate write, 2088 * just complete the write by copying over the new block pointer and 2089 * updating the accounting via the write-completion functions. 2090 */ 2091 if (dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2092 zio_t zio_fake; 2093 2094 zio_fake.io_private = &db; 2095 zio_fake.io_error = 0; 2096 zio_fake.io_bp = db->db_blkptr; 2097 zio_fake.io_bp_orig = *db->db_blkptr; 2098 zio_fake.io_txg = txg; 2099 zio_fake.io_flags = 0; 2100 2101 *db->db_blkptr = dr->dt.dl.dr_overridden_by; 2102 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 2103 db->db_data_pending = dr; 2104 dr->dr_zio = &zio_fake; 2105 mutex_exit(&db->db_mtx); 2106 2107 ASSERT(!DVA_EQUAL(BP_IDENTITY(zio_fake.io_bp), 2108 BP_IDENTITY(&zio_fake.io_bp_orig)) || 2109 BP_IS_HOLE(zio_fake.io_bp)); 2110 2111 if (BP_IS_OLDER(&zio_fake.io_bp_orig, txg)) 2112 (void) dsl_dataset_block_kill(os->os_dsl_dataset, 2113 &zio_fake.io_bp_orig, dn->dn_zio, tx); 2114 2115 dbuf_write_ready(&zio_fake, db->db_buf, db); 2116 dbuf_write_done(&zio_fake, db->db_buf, db); 2117 2118 return; 2119 } 2120 2121 if (db->db_state != DB_NOFILL && 2122 dn->dn_object != DMU_META_DNODE_OBJECT && 2123 refcount_count(&db->db_holds) > 1 && 2124 *datap == db->db_buf) { 2125 /* 2126 * If this buffer is currently "in use" (i.e., there 2127 * are active holds and db_data still references it), 2128 * then make a copy before we start the write so that 2129 * any modifications from the open txg will not leak 2130 * into this write. 2131 * 2132 * NOTE: this copy does not need to be made for 2133 * objects only modified in the syncing context (e.g. 2134 * DNONE_DNODE blocks). 2135 */ 2136 int blksz = arc_buf_size(*datap); 2137 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2138 *datap = arc_buf_alloc(os->os_spa, blksz, db, type); 2139 bcopy(db->db.db_data, (*datap)->b_data, blksz); 2140 } 2141 db->db_data_pending = dr; 2142 2143 mutex_exit(&db->db_mtx); 2144 2145 dbuf_write(dr, *datap, tx); 2146 2147 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2148 if (dn->dn_object == DMU_META_DNODE_OBJECT) 2149 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 2150 else 2151 zio_nowait(dr->dr_zio); 2152 } 2153 2154 void 2155 dbuf_sync_list(list_t *list, dmu_tx_t *tx) 2156 { 2157 dbuf_dirty_record_t *dr; 2158 2159 while (dr = list_head(list)) { 2160 if (dr->dr_zio != NULL) { 2161 /* 2162 * If we find an already initialized zio then we 2163 * are processing the meta-dnode, and we have finished. 2164 * The dbufs for all dnodes are put back on the list 2165 * during processing, so that we can zio_wait() 2166 * these IOs after initiating all child IOs. 2167 */ 2168 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 2169 DMU_META_DNODE_OBJECT); 2170 break; 2171 } 2172 list_remove(list, dr); 2173 if (dr->dr_dbuf->db_level > 0) 2174 dbuf_sync_indirect(dr, tx); 2175 else 2176 dbuf_sync_leaf(dr, tx); 2177 } 2178 } 2179 2180 static void 2181 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 2182 { 2183 dmu_buf_impl_t *db = dr->dr_dbuf; 2184 dnode_t *dn = db->db_dnode; 2185 objset_impl_t *os = dn->dn_objset; 2186 dmu_buf_impl_t *parent = db->db_parent; 2187 uint64_t txg = tx->tx_txg; 2188 zbookmark_t zb; 2189 writeprops_t wp = { 0 }; 2190 zio_t *zio; 2191 2192 if (!BP_IS_HOLE(db->db_blkptr) && 2193 (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE)) { 2194 /* 2195 * Private object buffers are released here rather 2196 * than in dbuf_dirty() since they are only modified 2197 * in the syncing context and we don't want the 2198 * overhead of making multiple copies of the data. 2199 */ 2200 arc_release(data, db); 2201 } else if (db->db_state != DB_NOFILL) { 2202 ASSERT(arc_released(data)); 2203 /* XXX why do we need to thaw here? */ 2204 arc_buf_thaw(data); 2205 } 2206 2207 if (parent != dn->dn_dbuf) { 2208 ASSERT(parent && parent->db_data_pending); 2209 ASSERT(db->db_level == parent->db_level-1); 2210 ASSERT(arc_released(parent->db_buf)); 2211 zio = parent->db_data_pending->dr_zio; 2212 } else { 2213 ASSERT(db->db_level == dn->dn_phys->dn_nlevels-1); 2214 ASSERT3P(db->db_blkptr, ==, 2215 &dn->dn_phys->dn_blkptr[db->db_blkid]); 2216 zio = dn->dn_zio; 2217 } 2218 2219 ASSERT(db->db_level == 0 || data == db->db_buf); 2220 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 2221 ASSERT(zio); 2222 2223 zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0; 2224 zb.zb_object = db->db.db_object; 2225 zb.zb_level = db->db_level; 2226 zb.zb_blkid = db->db_blkid; 2227 2228 wp.wp_type = dn->dn_type; 2229 wp.wp_level = db->db_level; 2230 wp.wp_copies = os->os_copies; 2231 wp.wp_dncompress = dn->dn_compress; 2232 wp.wp_oscompress = os->os_compress; 2233 wp.wp_dnchecksum = dn->dn_checksum; 2234 wp.wp_oschecksum = os->os_checksum; 2235 2236 if (BP_IS_OLDER(db->db_blkptr, txg)) 2237 (void) dsl_dataset_block_kill( 2238 os->os_dsl_dataset, db->db_blkptr, zio, tx); 2239 2240 if (db->db_state == DB_NOFILL) { 2241 zio_prop_t zp = { 0 }; 2242 2243 write_policy(os->os_spa, &wp, &zp); 2244 dr->dr_zio = zio_write(zio, os->os_spa, 2245 txg, db->db_blkptr, NULL, 2246 db->db.db_size, &zp, dbuf_skip_write_ready, 2247 dbuf_skip_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, 2248 ZIO_FLAG_MUSTSUCCEED, &zb); 2249 } else { 2250 dr->dr_zio = arc_write(zio, os->os_spa, &wp, 2251 DBUF_IS_L2CACHEABLE(db), txg, db->db_blkptr, 2252 data, dbuf_write_ready, dbuf_write_done, db, 2253 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 2254 } 2255 } 2256 2257 /* wrapper function for dbuf_write_ready bypassing ARC */ 2258 static void 2259 dbuf_skip_write_ready(zio_t *zio) 2260 { 2261 blkptr_t *bp = zio->io_bp; 2262 2263 if (!BP_IS_GANG(bp)) 2264 zio_skip_write(zio); 2265 2266 dbuf_write_ready(zio, NULL, zio->io_private); 2267 } 2268 2269 /* wrapper function for dbuf_write_done bypassing ARC */ 2270 static void 2271 dbuf_skip_write_done(zio_t *zio) 2272 { 2273 dbuf_write_done(zio, NULL, zio->io_private); 2274 } 2275 2276 /* ARGSUSED */ 2277 static void 2278 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 2279 { 2280 dmu_buf_impl_t *db = vdb; 2281 dnode_t *dn = db->db_dnode; 2282 objset_impl_t *os = dn->dn_objset; 2283 blkptr_t *bp = zio->io_bp; 2284 blkptr_t *bp_orig = &zio->io_bp_orig; 2285 uint64_t fill = 0; 2286 int old_size, new_size, i; 2287 2288 ASSERT(db->db_blkptr == bp); 2289 2290 dprintf_dbuf_bp(db, bp_orig, "bp_orig: %s", ""); 2291 2292 old_size = bp_get_dasize(os->os_spa, bp_orig); 2293 new_size = bp_get_dasize(os->os_spa, bp); 2294 2295 dnode_diduse_space(dn, new_size - old_size); 2296 2297 if (BP_IS_HOLE(bp)) { 2298 dsl_dataset_t *ds = os->os_dsl_dataset; 2299 dmu_tx_t *tx = os->os_synctx; 2300 2301 if (bp_orig->blk_birth == tx->tx_txg) 2302 (void) dsl_dataset_block_kill(ds, bp_orig, zio, tx); 2303 ASSERT3U(bp->blk_fill, ==, 0); 2304 return; 2305 } 2306 2307 ASSERT(BP_GET_TYPE(bp) == dn->dn_type); 2308 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 2309 2310 mutex_enter(&db->db_mtx); 2311 2312 if (db->db_level == 0) { 2313 mutex_enter(&dn->dn_mtx); 2314 if (db->db_blkid > dn->dn_phys->dn_maxblkid) 2315 dn->dn_phys->dn_maxblkid = db->db_blkid; 2316 mutex_exit(&dn->dn_mtx); 2317 2318 if (dn->dn_type == DMU_OT_DNODE) { 2319 dnode_phys_t *dnp = db->db.db_data; 2320 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 2321 i--, dnp++) { 2322 if (dnp->dn_type != DMU_OT_NONE) 2323 fill++; 2324 } 2325 } else { 2326 fill = 1; 2327 } 2328 } else { 2329 blkptr_t *ibp = db->db.db_data; 2330 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2331 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 2332 if (BP_IS_HOLE(ibp)) 2333 continue; 2334 ASSERT3U(BP_GET_LSIZE(ibp), ==, 2335 db->db_level == 1 ? dn->dn_datablksz : 2336 (1<<dn->dn_phys->dn_indblkshift)); 2337 fill += ibp->blk_fill; 2338 } 2339 } 2340 2341 bp->blk_fill = fill; 2342 2343 mutex_exit(&db->db_mtx); 2344 2345 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 2346 ASSERT(DVA_EQUAL(BP_IDENTITY(bp), BP_IDENTITY(bp_orig))); 2347 } else { 2348 dsl_dataset_t *ds = os->os_dsl_dataset; 2349 dmu_tx_t *tx = os->os_synctx; 2350 2351 if (bp_orig->blk_birth == tx->tx_txg) 2352 (void) dsl_dataset_block_kill(ds, bp_orig, zio, tx); 2353 dsl_dataset_block_born(ds, bp, tx); 2354 } 2355 } 2356 2357 /* ARGSUSED */ 2358 static void 2359 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 2360 { 2361 dmu_buf_impl_t *db = vdb; 2362 uint64_t txg = zio->io_txg; 2363 dbuf_dirty_record_t **drp, *dr; 2364 2365 ASSERT3U(zio->io_error, ==, 0); 2366 2367 mutex_enter(&db->db_mtx); 2368 2369 drp = &db->db_last_dirty; 2370 while ((dr = *drp) != db->db_data_pending) 2371 drp = &dr->dr_next; 2372 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2373 ASSERT(dr->dr_txg == txg); 2374 ASSERT(dr->dr_next == NULL); 2375 *drp = dr->dr_next; 2376 2377 if (db->db_level == 0) { 2378 ASSERT(db->db_blkid != DB_BONUS_BLKID); 2379 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2380 2381 if (db->db_state != DB_NOFILL) { 2382 if (dr->dt.dl.dr_data != db->db_buf) 2383 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 2384 db) == 1); 2385 else if (!BP_IS_HOLE(db->db_blkptr)) 2386 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2387 else 2388 ASSERT(arc_released(db->db_buf)); 2389 } 2390 } else { 2391 dnode_t *dn = db->db_dnode; 2392 2393 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2394 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2395 if (!BP_IS_HOLE(db->db_blkptr)) { 2396 int epbs = 2397 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2398 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 2399 db->db.db_size); 2400 ASSERT3U(dn->dn_phys->dn_maxblkid 2401 >> (db->db_level * epbs), >=, db->db_blkid); 2402 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2403 } 2404 mutex_destroy(&dr->dt.di.dr_mtx); 2405 list_destroy(&dr->dt.di.dr_children); 2406 } 2407 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2408 2409 cv_broadcast(&db->db_changed); 2410 ASSERT(db->db_dirtycnt > 0); 2411 db->db_dirtycnt -= 1; 2412 db->db_data_pending = NULL; 2413 mutex_exit(&db->db_mtx); 2414 2415 dprintf_dbuf_bp(db, zio->io_bp, "bp: %s", ""); 2416 2417 dbuf_rele(db, (void *)(uintptr_t)txg); 2418 } 2419