1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2013 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/dmu.h> 30 #include <sys/dmu_impl.h> 31 #include <sys/dbuf.h> 32 #include <sys/dmu_objset.h> 33 #include <sys/dsl_dataset.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dmu_tx.h> 36 #include <sys/spa.h> 37 #include <sys/zio.h> 38 #include <sys/dmu_zfetch.h> 39 #include <sys/sa.h> 40 #include <sys/sa_impl.h> 41 42 static void dbuf_destroy(dmu_buf_impl_t *db); 43 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 44 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 45 46 /* 47 * Global data structures and functions for the dbuf cache. 48 */ 49 static kmem_cache_t *dbuf_cache; 50 51 /* ARGSUSED */ 52 static int 53 dbuf_cons(void *vdb, void *unused, int kmflag) 54 { 55 dmu_buf_impl_t *db = vdb; 56 bzero(db, sizeof (dmu_buf_impl_t)); 57 58 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 59 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 60 refcount_create(&db->db_holds); 61 return (0); 62 } 63 64 /* ARGSUSED */ 65 static void 66 dbuf_dest(void *vdb, void *unused) 67 { 68 dmu_buf_impl_t *db = vdb; 69 mutex_destroy(&db->db_mtx); 70 cv_destroy(&db->db_changed); 71 refcount_destroy(&db->db_holds); 72 } 73 74 /* 75 * dbuf hash table routines 76 */ 77 static dbuf_hash_table_t dbuf_hash_table; 78 79 static uint64_t dbuf_hash_count; 80 81 static uint64_t 82 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 83 { 84 uintptr_t osv = (uintptr_t)os; 85 uint64_t crc = -1ULL; 86 87 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 88 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 89 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 90 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 91 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 92 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 93 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 94 95 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 96 97 return (crc); 98 } 99 100 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); 101 102 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 103 ((dbuf)->db.db_object == (obj) && \ 104 (dbuf)->db_objset == (os) && \ 105 (dbuf)->db_level == (level) && \ 106 (dbuf)->db_blkid == (blkid)) 107 108 dmu_buf_impl_t * 109 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) 110 { 111 dbuf_hash_table_t *h = &dbuf_hash_table; 112 objset_t *os = dn->dn_objset; 113 uint64_t obj = dn->dn_object; 114 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 115 uint64_t idx = hv & h->hash_table_mask; 116 dmu_buf_impl_t *db; 117 118 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 119 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 120 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 121 mutex_enter(&db->db_mtx); 122 if (db->db_state != DB_EVICTING) { 123 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 124 return (db); 125 } 126 mutex_exit(&db->db_mtx); 127 } 128 } 129 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 130 return (NULL); 131 } 132 133 /* 134 * Insert an entry into the hash table. If there is already an element 135 * equal to elem in the hash table, then the already existing element 136 * will be returned and the new element will not be inserted. 137 * Otherwise returns NULL. 138 */ 139 static dmu_buf_impl_t * 140 dbuf_hash_insert(dmu_buf_impl_t *db) 141 { 142 dbuf_hash_table_t *h = &dbuf_hash_table; 143 objset_t *os = db->db_objset; 144 uint64_t obj = db->db.db_object; 145 int level = db->db_level; 146 uint64_t blkid = db->db_blkid; 147 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 148 uint64_t idx = hv & h->hash_table_mask; 149 dmu_buf_impl_t *dbf; 150 151 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 152 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 153 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 154 mutex_enter(&dbf->db_mtx); 155 if (dbf->db_state != DB_EVICTING) { 156 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 157 return (dbf); 158 } 159 mutex_exit(&dbf->db_mtx); 160 } 161 } 162 163 mutex_enter(&db->db_mtx); 164 db->db_hash_next = h->hash_table[idx]; 165 h->hash_table[idx] = db; 166 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 167 atomic_add_64(&dbuf_hash_count, 1); 168 169 return (NULL); 170 } 171 172 /* 173 * Remove an entry from the hash table. This operation will 174 * fail if there are any existing holds on the db. 175 */ 176 static void 177 dbuf_hash_remove(dmu_buf_impl_t *db) 178 { 179 dbuf_hash_table_t *h = &dbuf_hash_table; 180 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, 181 db->db_level, db->db_blkid); 182 uint64_t idx = hv & h->hash_table_mask; 183 dmu_buf_impl_t *dbf, **dbp; 184 185 /* 186 * We musn't hold db_mtx to maintin lock ordering: 187 * DBUF_HASH_MUTEX > db_mtx. 188 */ 189 ASSERT(refcount_is_zero(&db->db_holds)); 190 ASSERT(db->db_state == DB_EVICTING); 191 ASSERT(!MUTEX_HELD(&db->db_mtx)); 192 193 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 194 dbp = &h->hash_table[idx]; 195 while ((dbf = *dbp) != db) { 196 dbp = &dbf->db_hash_next; 197 ASSERT(dbf != NULL); 198 } 199 *dbp = db->db_hash_next; 200 db->db_hash_next = NULL; 201 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 202 atomic_add_64(&dbuf_hash_count, -1); 203 } 204 205 static arc_evict_func_t dbuf_do_evict; 206 207 static void 208 dbuf_evict_user(dmu_buf_impl_t *db) 209 { 210 ASSERT(MUTEX_HELD(&db->db_mtx)); 211 212 if (db->db_level != 0 || db->db_evict_func == NULL) 213 return; 214 215 if (db->db_user_data_ptr_ptr) 216 *db->db_user_data_ptr_ptr = db->db.db_data; 217 db->db_evict_func(&db->db, db->db_user_ptr); 218 db->db_user_ptr = NULL; 219 db->db_user_data_ptr_ptr = NULL; 220 db->db_evict_func = NULL; 221 } 222 223 boolean_t 224 dbuf_is_metadata(dmu_buf_impl_t *db) 225 { 226 if (db->db_level > 0) { 227 return (B_TRUE); 228 } else { 229 boolean_t is_metadata; 230 231 DB_DNODE_ENTER(db); 232 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 233 DB_DNODE_EXIT(db); 234 235 return (is_metadata); 236 } 237 } 238 239 void 240 dbuf_evict(dmu_buf_impl_t *db) 241 { 242 ASSERT(MUTEX_HELD(&db->db_mtx)); 243 ASSERT(db->db_buf == NULL); 244 ASSERT(db->db_data_pending == NULL); 245 246 dbuf_clear(db); 247 dbuf_destroy(db); 248 } 249 250 void 251 dbuf_init(void) 252 { 253 uint64_t hsize = 1ULL << 16; 254 dbuf_hash_table_t *h = &dbuf_hash_table; 255 int i; 256 257 /* 258 * The hash table is big enough to fill all of physical memory 259 * with an average 4K block size. The table will take up 260 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 261 */ 262 while (hsize * 4096 < physmem * PAGESIZE) 263 hsize <<= 1; 264 265 retry: 266 h->hash_table_mask = hsize - 1; 267 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 268 if (h->hash_table == NULL) { 269 /* XXX - we should really return an error instead of assert */ 270 ASSERT(hsize > (1ULL << 10)); 271 hsize >>= 1; 272 goto retry; 273 } 274 275 dbuf_cache = kmem_cache_create("dmu_buf_impl_t", 276 sizeof (dmu_buf_impl_t), 277 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 278 279 for (i = 0; i < DBUF_MUTEXES; i++) 280 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 281 } 282 283 void 284 dbuf_fini(void) 285 { 286 dbuf_hash_table_t *h = &dbuf_hash_table; 287 int i; 288 289 for (i = 0; i < DBUF_MUTEXES; i++) 290 mutex_destroy(&h->hash_mutexes[i]); 291 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 292 kmem_cache_destroy(dbuf_cache); 293 } 294 295 /* 296 * Other stuff. 297 */ 298 299 #ifdef ZFS_DEBUG 300 static void 301 dbuf_verify(dmu_buf_impl_t *db) 302 { 303 dnode_t *dn; 304 dbuf_dirty_record_t *dr; 305 306 ASSERT(MUTEX_HELD(&db->db_mtx)); 307 308 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 309 return; 310 311 ASSERT(db->db_objset != NULL); 312 DB_DNODE_ENTER(db); 313 dn = DB_DNODE(db); 314 if (dn == NULL) { 315 ASSERT(db->db_parent == NULL); 316 ASSERT(db->db_blkptr == NULL); 317 } else { 318 ASSERT3U(db->db.db_object, ==, dn->dn_object); 319 ASSERT3P(db->db_objset, ==, dn->dn_objset); 320 ASSERT3U(db->db_level, <, dn->dn_nlevels); 321 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 322 db->db_blkid == DMU_SPILL_BLKID || 323 !list_is_empty(&dn->dn_dbufs)); 324 } 325 if (db->db_blkid == DMU_BONUS_BLKID) { 326 ASSERT(dn != NULL); 327 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 328 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 329 } else if (db->db_blkid == DMU_SPILL_BLKID) { 330 ASSERT(dn != NULL); 331 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 332 ASSERT0(db->db.db_offset); 333 } else { 334 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 335 } 336 337 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 338 ASSERT(dr->dr_dbuf == db); 339 340 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 341 ASSERT(dr->dr_dbuf == db); 342 343 /* 344 * We can't assert that db_size matches dn_datablksz because it 345 * can be momentarily different when another thread is doing 346 * dnode_set_blksz(). 347 */ 348 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 349 dr = db->db_data_pending; 350 /* 351 * It should only be modified in syncing context, so 352 * make sure we only have one copy of the data. 353 */ 354 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 355 } 356 357 /* verify db->db_blkptr */ 358 if (db->db_blkptr) { 359 if (db->db_parent == dn->dn_dbuf) { 360 /* db is pointed to by the dnode */ 361 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 362 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 363 ASSERT(db->db_parent == NULL); 364 else 365 ASSERT(db->db_parent != NULL); 366 if (db->db_blkid != DMU_SPILL_BLKID) 367 ASSERT3P(db->db_blkptr, ==, 368 &dn->dn_phys->dn_blkptr[db->db_blkid]); 369 } else { 370 /* db is pointed to by an indirect block */ 371 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 372 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 373 ASSERT3U(db->db_parent->db.db_object, ==, 374 db->db.db_object); 375 /* 376 * dnode_grow_indblksz() can make this fail if we don't 377 * have the struct_rwlock. XXX indblksz no longer 378 * grows. safe to do this now? 379 */ 380 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 381 ASSERT3P(db->db_blkptr, ==, 382 ((blkptr_t *)db->db_parent->db.db_data + 383 db->db_blkid % epb)); 384 } 385 } 386 } 387 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 388 (db->db_buf == NULL || db->db_buf->b_data) && 389 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 390 db->db_state != DB_FILL && !dn->dn_free_txg) { 391 /* 392 * If the blkptr isn't set but they have nonzero data, 393 * it had better be dirty, otherwise we'll lose that 394 * data when we evict this buffer. 395 */ 396 if (db->db_dirtycnt == 0) { 397 uint64_t *buf = db->db.db_data; 398 int i; 399 400 for (i = 0; i < db->db.db_size >> 3; i++) { 401 ASSERT(buf[i] == 0); 402 } 403 } 404 } 405 DB_DNODE_EXIT(db); 406 } 407 #endif 408 409 static void 410 dbuf_update_data(dmu_buf_impl_t *db) 411 { 412 ASSERT(MUTEX_HELD(&db->db_mtx)); 413 if (db->db_level == 0 && db->db_user_data_ptr_ptr) { 414 ASSERT(!refcount_is_zero(&db->db_holds)); 415 *db->db_user_data_ptr_ptr = db->db.db_data; 416 } 417 } 418 419 static void 420 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 421 { 422 ASSERT(MUTEX_HELD(&db->db_mtx)); 423 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf)); 424 db->db_buf = buf; 425 if (buf != NULL) { 426 ASSERT(buf->b_data != NULL); 427 db->db.db_data = buf->b_data; 428 if (!arc_released(buf)) 429 arc_set_callback(buf, dbuf_do_evict, db); 430 dbuf_update_data(db); 431 } else { 432 dbuf_evict_user(db); 433 db->db.db_data = NULL; 434 if (db->db_state != DB_NOFILL) 435 db->db_state = DB_UNCACHED; 436 } 437 } 438 439 /* 440 * Loan out an arc_buf for read. Return the loaned arc_buf. 441 */ 442 arc_buf_t * 443 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 444 { 445 arc_buf_t *abuf; 446 447 mutex_enter(&db->db_mtx); 448 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 449 int blksz = db->db.db_size; 450 spa_t *spa; 451 452 mutex_exit(&db->db_mtx); 453 DB_GET_SPA(&spa, db); 454 abuf = arc_loan_buf(spa, blksz); 455 bcopy(db->db.db_data, abuf->b_data, blksz); 456 } else { 457 abuf = db->db_buf; 458 arc_loan_inuse_buf(abuf, db); 459 dbuf_set_data(db, NULL); 460 mutex_exit(&db->db_mtx); 461 } 462 return (abuf); 463 } 464 465 uint64_t 466 dbuf_whichblock(dnode_t *dn, uint64_t offset) 467 { 468 if (dn->dn_datablkshift) { 469 return (offset >> dn->dn_datablkshift); 470 } else { 471 ASSERT3U(offset, <, dn->dn_datablksz); 472 return (0); 473 } 474 } 475 476 static void 477 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 478 { 479 dmu_buf_impl_t *db = vdb; 480 481 mutex_enter(&db->db_mtx); 482 ASSERT3U(db->db_state, ==, DB_READ); 483 /* 484 * All reads are synchronous, so we must have a hold on the dbuf 485 */ 486 ASSERT(refcount_count(&db->db_holds) > 0); 487 ASSERT(db->db_buf == NULL); 488 ASSERT(db->db.db_data == NULL); 489 if (db->db_level == 0 && db->db_freed_in_flight) { 490 /* we were freed in flight; disregard any error */ 491 arc_release(buf, db); 492 bzero(buf->b_data, db->db.db_size); 493 arc_buf_freeze(buf); 494 db->db_freed_in_flight = FALSE; 495 dbuf_set_data(db, buf); 496 db->db_state = DB_CACHED; 497 } else if (zio == NULL || zio->io_error == 0) { 498 dbuf_set_data(db, buf); 499 db->db_state = DB_CACHED; 500 } else { 501 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 502 ASSERT3P(db->db_buf, ==, NULL); 503 VERIFY(arc_buf_remove_ref(buf, db)); 504 db->db_state = DB_UNCACHED; 505 } 506 cv_broadcast(&db->db_changed); 507 dbuf_rele_and_unlock(db, NULL); 508 } 509 510 static void 511 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) 512 { 513 dnode_t *dn; 514 spa_t *spa; 515 zbookmark_t zb; 516 uint32_t aflags = ARC_NOWAIT; 517 518 DB_DNODE_ENTER(db); 519 dn = DB_DNODE(db); 520 ASSERT(!refcount_is_zero(&db->db_holds)); 521 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 522 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 523 ASSERT(MUTEX_HELD(&db->db_mtx)); 524 ASSERT(db->db_state == DB_UNCACHED); 525 ASSERT(db->db_buf == NULL); 526 527 if (db->db_blkid == DMU_BONUS_BLKID) { 528 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 529 530 ASSERT3U(bonuslen, <=, db->db.db_size); 531 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 532 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 533 if (bonuslen < DN_MAX_BONUSLEN) 534 bzero(db->db.db_data, DN_MAX_BONUSLEN); 535 if (bonuslen) 536 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 537 DB_DNODE_EXIT(db); 538 dbuf_update_data(db); 539 db->db_state = DB_CACHED; 540 mutex_exit(&db->db_mtx); 541 return; 542 } 543 544 /* 545 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 546 * processes the delete record and clears the bp while we are waiting 547 * for the dn_mtx (resulting in a "no" from block_freed). 548 */ 549 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 550 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 551 BP_IS_HOLE(db->db_blkptr)))) { 552 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 553 554 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa, 555 db->db.db_size, db, type)); 556 DB_DNODE_EXIT(db); 557 bzero(db->db.db_data, db->db.db_size); 558 db->db_state = DB_CACHED; 559 *flags |= DB_RF_CACHED; 560 mutex_exit(&db->db_mtx); 561 return; 562 } 563 564 spa = dn->dn_objset->os_spa; 565 DB_DNODE_EXIT(db); 566 567 db->db_state = DB_READ; 568 mutex_exit(&db->db_mtx); 569 570 if (DBUF_IS_L2CACHEABLE(db)) 571 aflags |= ARC_L2CACHE; 572 if (DBUF_IS_L2COMPRESSIBLE(db)) 573 aflags |= ARC_L2COMPRESS; 574 575 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 576 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 577 db->db.db_object, db->db_level, db->db_blkid); 578 579 dbuf_add_ref(db, NULL); 580 581 (void) arc_read(zio, spa, db->db_blkptr, 582 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 583 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 584 &aflags, &zb); 585 if (aflags & ARC_CACHED) 586 *flags |= DB_RF_CACHED; 587 } 588 589 int 590 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 591 { 592 int err = 0; 593 int havepzio = (zio != NULL); 594 int prefetch; 595 dnode_t *dn; 596 597 /* 598 * We don't have to hold the mutex to check db_state because it 599 * can't be freed while we have a hold on the buffer. 600 */ 601 ASSERT(!refcount_is_zero(&db->db_holds)); 602 603 if (db->db_state == DB_NOFILL) 604 return (SET_ERROR(EIO)); 605 606 DB_DNODE_ENTER(db); 607 dn = DB_DNODE(db); 608 if ((flags & DB_RF_HAVESTRUCT) == 0) 609 rw_enter(&dn->dn_struct_rwlock, RW_READER); 610 611 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 612 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 613 DBUF_IS_CACHEABLE(db); 614 615 mutex_enter(&db->db_mtx); 616 if (db->db_state == DB_CACHED) { 617 mutex_exit(&db->db_mtx); 618 if (prefetch) 619 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset, 620 db->db.db_size, TRUE); 621 if ((flags & DB_RF_HAVESTRUCT) == 0) 622 rw_exit(&dn->dn_struct_rwlock); 623 DB_DNODE_EXIT(db); 624 } else if (db->db_state == DB_UNCACHED) { 625 spa_t *spa = dn->dn_objset->os_spa; 626 627 if (zio == NULL) 628 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 629 dbuf_read_impl(db, zio, &flags); 630 631 /* dbuf_read_impl has dropped db_mtx for us */ 632 633 if (prefetch) 634 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset, 635 db->db.db_size, flags & DB_RF_CACHED); 636 637 if ((flags & DB_RF_HAVESTRUCT) == 0) 638 rw_exit(&dn->dn_struct_rwlock); 639 DB_DNODE_EXIT(db); 640 641 if (!havepzio) 642 err = zio_wait(zio); 643 } else { 644 mutex_exit(&db->db_mtx); 645 if (prefetch) 646 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset, 647 db->db.db_size, TRUE); 648 if ((flags & DB_RF_HAVESTRUCT) == 0) 649 rw_exit(&dn->dn_struct_rwlock); 650 DB_DNODE_EXIT(db); 651 652 mutex_enter(&db->db_mtx); 653 if ((flags & DB_RF_NEVERWAIT) == 0) { 654 while (db->db_state == DB_READ || 655 db->db_state == DB_FILL) { 656 ASSERT(db->db_state == DB_READ || 657 (flags & DB_RF_HAVESTRUCT) == 0); 658 cv_wait(&db->db_changed, &db->db_mtx); 659 } 660 if (db->db_state == DB_UNCACHED) 661 err = SET_ERROR(EIO); 662 } 663 mutex_exit(&db->db_mtx); 664 } 665 666 ASSERT(err || havepzio || db->db_state == DB_CACHED); 667 return (err); 668 } 669 670 static void 671 dbuf_noread(dmu_buf_impl_t *db) 672 { 673 ASSERT(!refcount_is_zero(&db->db_holds)); 674 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 675 mutex_enter(&db->db_mtx); 676 while (db->db_state == DB_READ || db->db_state == DB_FILL) 677 cv_wait(&db->db_changed, &db->db_mtx); 678 if (db->db_state == DB_UNCACHED) { 679 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 680 spa_t *spa; 681 682 ASSERT(db->db_buf == NULL); 683 ASSERT(db->db.db_data == NULL); 684 DB_GET_SPA(&spa, db); 685 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type)); 686 db->db_state = DB_FILL; 687 } else if (db->db_state == DB_NOFILL) { 688 dbuf_set_data(db, NULL); 689 } else { 690 ASSERT3U(db->db_state, ==, DB_CACHED); 691 } 692 mutex_exit(&db->db_mtx); 693 } 694 695 /* 696 * This is our just-in-time copy function. It makes a copy of 697 * buffers, that have been modified in a previous transaction 698 * group, before we modify them in the current active group. 699 * 700 * This function is used in two places: when we are dirtying a 701 * buffer for the first time in a txg, and when we are freeing 702 * a range in a dnode that includes this buffer. 703 * 704 * Note that when we are called from dbuf_free_range() we do 705 * not put a hold on the buffer, we just traverse the active 706 * dbuf list for the dnode. 707 */ 708 static void 709 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 710 { 711 dbuf_dirty_record_t *dr = db->db_last_dirty; 712 713 ASSERT(MUTEX_HELD(&db->db_mtx)); 714 ASSERT(db->db.db_data != NULL); 715 ASSERT(db->db_level == 0); 716 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 717 718 if (dr == NULL || 719 (dr->dt.dl.dr_data != 720 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 721 return; 722 723 /* 724 * If the last dirty record for this dbuf has not yet synced 725 * and its referencing the dbuf data, either: 726 * reset the reference to point to a new copy, 727 * or (if there a no active holders) 728 * just null out the current db_data pointer. 729 */ 730 ASSERT(dr->dr_txg >= txg - 2); 731 if (db->db_blkid == DMU_BONUS_BLKID) { 732 /* Note that the data bufs here are zio_bufs */ 733 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 734 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 735 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 736 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 737 int size = db->db.db_size; 738 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 739 spa_t *spa; 740 741 DB_GET_SPA(&spa, db); 742 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type); 743 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 744 } else { 745 dbuf_set_data(db, NULL); 746 } 747 } 748 749 void 750 dbuf_unoverride(dbuf_dirty_record_t *dr) 751 { 752 dmu_buf_impl_t *db = dr->dr_dbuf; 753 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 754 uint64_t txg = dr->dr_txg; 755 756 ASSERT(MUTEX_HELD(&db->db_mtx)); 757 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 758 ASSERT(db->db_level == 0); 759 760 if (db->db_blkid == DMU_BONUS_BLKID || 761 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 762 return; 763 764 ASSERT(db->db_data_pending != dr); 765 766 /* free this block */ 767 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) { 768 spa_t *spa; 769 770 DB_GET_SPA(&spa, db); 771 zio_free(spa, txg, bp); 772 } 773 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 774 dr->dt.dl.dr_nopwrite = B_FALSE; 775 776 /* 777 * Release the already-written buffer, so we leave it in 778 * a consistent dirty state. Note that all callers are 779 * modifying the buffer, so they will immediately do 780 * another (redundant) arc_release(). Therefore, leave 781 * the buf thawed to save the effort of freezing & 782 * immediately re-thawing it. 783 */ 784 arc_release(dr->dt.dl.dr_data, db); 785 } 786 787 /* 788 * Evict (if its unreferenced) or clear (if its referenced) any level-0 789 * data blocks in the free range, so that any future readers will find 790 * empty blocks. Also, if we happen accross any level-1 dbufs in the 791 * range that have not already been marked dirty, mark them dirty so 792 * they stay in memory. 793 */ 794 void 795 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx) 796 { 797 dmu_buf_impl_t *db, *db_next; 798 uint64_t txg = tx->tx_txg; 799 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 800 uint64_t first_l1 = start >> epbs; 801 uint64_t last_l1 = end >> epbs; 802 803 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) { 804 end = dn->dn_maxblkid; 805 last_l1 = end >> epbs; 806 } 807 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end); 808 mutex_enter(&dn->dn_dbufs_mtx); 809 for (db = list_head(&dn->dn_dbufs); db; db = db_next) { 810 db_next = list_next(&dn->dn_dbufs, db); 811 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 812 813 if (db->db_level == 1 && 814 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) { 815 mutex_enter(&db->db_mtx); 816 if (db->db_last_dirty && 817 db->db_last_dirty->dr_txg < txg) { 818 dbuf_add_ref(db, FTAG); 819 mutex_exit(&db->db_mtx); 820 dbuf_will_dirty(db, tx); 821 dbuf_rele(db, FTAG); 822 } else { 823 mutex_exit(&db->db_mtx); 824 } 825 } 826 827 if (db->db_level != 0) 828 continue; 829 dprintf_dbuf(db, "found buf %s\n", ""); 830 if (db->db_blkid < start || db->db_blkid > end) 831 continue; 832 833 /* found a level 0 buffer in the range */ 834 mutex_enter(&db->db_mtx); 835 if (dbuf_undirty(db, tx)) { 836 /* mutex has been dropped and dbuf destroyed */ 837 continue; 838 } 839 840 if (db->db_state == DB_UNCACHED || 841 db->db_state == DB_NOFILL || 842 db->db_state == DB_EVICTING) { 843 ASSERT(db->db.db_data == NULL); 844 mutex_exit(&db->db_mtx); 845 continue; 846 } 847 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 848 /* will be handled in dbuf_read_done or dbuf_rele */ 849 db->db_freed_in_flight = TRUE; 850 mutex_exit(&db->db_mtx); 851 continue; 852 } 853 if (refcount_count(&db->db_holds) == 0) { 854 ASSERT(db->db_buf); 855 dbuf_clear(db); 856 continue; 857 } 858 /* The dbuf is referenced */ 859 860 if (db->db_last_dirty != NULL) { 861 dbuf_dirty_record_t *dr = db->db_last_dirty; 862 863 if (dr->dr_txg == txg) { 864 /* 865 * This buffer is "in-use", re-adjust the file 866 * size to reflect that this buffer may 867 * contain new data when we sync. 868 */ 869 if (db->db_blkid != DMU_SPILL_BLKID && 870 db->db_blkid > dn->dn_maxblkid) 871 dn->dn_maxblkid = db->db_blkid; 872 dbuf_unoverride(dr); 873 } else { 874 /* 875 * This dbuf is not dirty in the open context. 876 * Either uncache it (if its not referenced in 877 * the open context) or reset its contents to 878 * empty. 879 */ 880 dbuf_fix_old_data(db, txg); 881 } 882 } 883 /* clear the contents if its cached */ 884 if (db->db_state == DB_CACHED) { 885 ASSERT(db->db.db_data != NULL); 886 arc_release(db->db_buf, db); 887 bzero(db->db.db_data, db->db.db_size); 888 arc_buf_freeze(db->db_buf); 889 } 890 891 mutex_exit(&db->db_mtx); 892 } 893 mutex_exit(&dn->dn_dbufs_mtx); 894 } 895 896 static int 897 dbuf_block_freeable(dmu_buf_impl_t *db) 898 { 899 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; 900 uint64_t birth_txg = 0; 901 902 /* 903 * We don't need any locking to protect db_blkptr: 904 * If it's syncing, then db_last_dirty will be set 905 * so we'll ignore db_blkptr. 906 */ 907 ASSERT(MUTEX_HELD(&db->db_mtx)); 908 if (db->db_last_dirty) 909 birth_txg = db->db_last_dirty->dr_txg; 910 else if (db->db_blkptr) 911 birth_txg = db->db_blkptr->blk_birth; 912 913 /* 914 * If we don't exist or are in a snapshot, we can't be freed. 915 * Don't pass the bp to dsl_dataset_block_freeable() since we 916 * are holding the db_mtx lock and might deadlock if we are 917 * prefetching a dedup-ed block. 918 */ 919 if (birth_txg) 920 return (ds == NULL || 921 dsl_dataset_block_freeable(ds, NULL, birth_txg)); 922 else 923 return (FALSE); 924 } 925 926 void 927 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 928 { 929 arc_buf_t *buf, *obuf; 930 int osize = db->db.db_size; 931 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 932 dnode_t *dn; 933 934 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 935 936 DB_DNODE_ENTER(db); 937 dn = DB_DNODE(db); 938 939 /* XXX does *this* func really need the lock? */ 940 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 941 942 /* 943 * This call to dbuf_will_dirty() with the dn_struct_rwlock held 944 * is OK, because there can be no other references to the db 945 * when we are changing its size, so no concurrent DB_FILL can 946 * be happening. 947 */ 948 /* 949 * XXX we should be doing a dbuf_read, checking the return 950 * value and returning that up to our callers 951 */ 952 dbuf_will_dirty(db, tx); 953 954 /* create the data buffer for the new block */ 955 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type); 956 957 /* copy old block data to the new block */ 958 obuf = db->db_buf; 959 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 960 /* zero the remainder */ 961 if (size > osize) 962 bzero((uint8_t *)buf->b_data + osize, size - osize); 963 964 mutex_enter(&db->db_mtx); 965 dbuf_set_data(db, buf); 966 VERIFY(arc_buf_remove_ref(obuf, db)); 967 db->db.db_size = size; 968 969 if (db->db_level == 0) { 970 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 971 db->db_last_dirty->dt.dl.dr_data = buf; 972 } 973 mutex_exit(&db->db_mtx); 974 975 dnode_willuse_space(dn, size-osize, tx); 976 DB_DNODE_EXIT(db); 977 } 978 979 void 980 dbuf_release_bp(dmu_buf_impl_t *db) 981 { 982 objset_t *os; 983 984 DB_GET_OBJSET(&os, db); 985 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 986 ASSERT(arc_released(os->os_phys_buf) || 987 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 988 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 989 990 (void) arc_release(db->db_buf, db); 991 } 992 993 dbuf_dirty_record_t * 994 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 995 { 996 dnode_t *dn; 997 objset_t *os; 998 dbuf_dirty_record_t **drp, *dr; 999 int drop_struct_lock = FALSE; 1000 boolean_t do_free_accounting = B_FALSE; 1001 int txgoff = tx->tx_txg & TXG_MASK; 1002 1003 ASSERT(tx->tx_txg != 0); 1004 ASSERT(!refcount_is_zero(&db->db_holds)); 1005 DMU_TX_DIRTY_BUF(tx, db); 1006 1007 DB_DNODE_ENTER(db); 1008 dn = DB_DNODE(db); 1009 /* 1010 * Shouldn't dirty a regular buffer in syncing context. Private 1011 * objects may be dirtied in syncing context, but only if they 1012 * were already pre-dirtied in open context. 1013 */ 1014 ASSERT(!dmu_tx_is_syncing(tx) || 1015 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1016 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1017 dn->dn_objset->os_dsl_dataset == NULL); 1018 /* 1019 * We make this assert for private objects as well, but after we 1020 * check if we're already dirty. They are allowed to re-dirty 1021 * in syncing context. 1022 */ 1023 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1024 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1025 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1026 1027 mutex_enter(&db->db_mtx); 1028 /* 1029 * XXX make this true for indirects too? The problem is that 1030 * transactions created with dmu_tx_create_assigned() from 1031 * syncing context don't bother holding ahead. 1032 */ 1033 ASSERT(db->db_level != 0 || 1034 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1035 db->db_state == DB_NOFILL); 1036 1037 mutex_enter(&dn->dn_mtx); 1038 /* 1039 * Don't set dirtyctx to SYNC if we're just modifying this as we 1040 * initialize the objset. 1041 */ 1042 if (dn->dn_dirtyctx == DN_UNDIRTIED && 1043 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1044 dn->dn_dirtyctx = 1045 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1046 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1047 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1048 } 1049 mutex_exit(&dn->dn_mtx); 1050 1051 if (db->db_blkid == DMU_SPILL_BLKID) 1052 dn->dn_have_spill = B_TRUE; 1053 1054 /* 1055 * If this buffer is already dirty, we're done. 1056 */ 1057 drp = &db->db_last_dirty; 1058 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1059 db->db.db_object == DMU_META_DNODE_OBJECT); 1060 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1061 drp = &dr->dr_next; 1062 if (dr && dr->dr_txg == tx->tx_txg) { 1063 DB_DNODE_EXIT(db); 1064 1065 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1066 /* 1067 * If this buffer has already been written out, 1068 * we now need to reset its state. 1069 */ 1070 dbuf_unoverride(dr); 1071 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1072 db->db_state != DB_NOFILL) 1073 arc_buf_thaw(db->db_buf); 1074 } 1075 mutex_exit(&db->db_mtx); 1076 return (dr); 1077 } 1078 1079 /* 1080 * Only valid if not already dirty. 1081 */ 1082 ASSERT(dn->dn_object == 0 || 1083 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1084 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1085 1086 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1087 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1088 dn->dn_phys->dn_nlevels > db->db_level || 1089 dn->dn_next_nlevels[txgoff] > db->db_level || 1090 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1091 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1092 1093 /* 1094 * We should only be dirtying in syncing context if it's the 1095 * mos or we're initializing the os or it's a special object. 1096 * However, we are allowed to dirty in syncing context provided 1097 * we already dirtied it in open context. Hence we must make 1098 * this assertion only if we're not already dirty. 1099 */ 1100 os = dn->dn_objset; 1101 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1102 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1103 ASSERT(db->db.db_size != 0); 1104 1105 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1106 1107 if (db->db_blkid != DMU_BONUS_BLKID) { 1108 /* 1109 * Update the accounting. 1110 * Note: we delay "free accounting" until after we drop 1111 * the db_mtx. This keeps us from grabbing other locks 1112 * (and possibly deadlocking) in bp_get_dsize() while 1113 * also holding the db_mtx. 1114 */ 1115 dnode_willuse_space(dn, db->db.db_size, tx); 1116 do_free_accounting = dbuf_block_freeable(db); 1117 } 1118 1119 /* 1120 * If this buffer is dirty in an old transaction group we need 1121 * to make a copy of it so that the changes we make in this 1122 * transaction group won't leak out when we sync the older txg. 1123 */ 1124 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1125 if (db->db_level == 0) { 1126 void *data_old = db->db_buf; 1127 1128 if (db->db_state != DB_NOFILL) { 1129 if (db->db_blkid == DMU_BONUS_BLKID) { 1130 dbuf_fix_old_data(db, tx->tx_txg); 1131 data_old = db->db.db_data; 1132 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1133 /* 1134 * Release the data buffer from the cache so 1135 * that we can modify it without impacting 1136 * possible other users of this cached data 1137 * block. Note that indirect blocks and 1138 * private objects are not released until the 1139 * syncing state (since they are only modified 1140 * then). 1141 */ 1142 arc_release(db->db_buf, db); 1143 dbuf_fix_old_data(db, tx->tx_txg); 1144 data_old = db->db_buf; 1145 } 1146 ASSERT(data_old != NULL); 1147 } 1148 dr->dt.dl.dr_data = data_old; 1149 } else { 1150 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1151 list_create(&dr->dt.di.dr_children, 1152 sizeof (dbuf_dirty_record_t), 1153 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1154 } 1155 dr->dr_dbuf = db; 1156 dr->dr_txg = tx->tx_txg; 1157 dr->dr_next = *drp; 1158 *drp = dr; 1159 1160 /* 1161 * We could have been freed_in_flight between the dbuf_noread 1162 * and dbuf_dirty. We win, as though the dbuf_noread() had 1163 * happened after the free. 1164 */ 1165 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1166 db->db_blkid != DMU_SPILL_BLKID) { 1167 mutex_enter(&dn->dn_mtx); 1168 dnode_clear_range(dn, db->db_blkid, 1, tx); 1169 mutex_exit(&dn->dn_mtx); 1170 db->db_freed_in_flight = FALSE; 1171 } 1172 1173 /* 1174 * This buffer is now part of this txg 1175 */ 1176 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1177 db->db_dirtycnt += 1; 1178 ASSERT3U(db->db_dirtycnt, <=, 3); 1179 1180 mutex_exit(&db->db_mtx); 1181 1182 if (db->db_blkid == DMU_BONUS_BLKID || 1183 db->db_blkid == DMU_SPILL_BLKID) { 1184 mutex_enter(&dn->dn_mtx); 1185 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1186 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1187 mutex_exit(&dn->dn_mtx); 1188 dnode_setdirty(dn, tx); 1189 DB_DNODE_EXIT(db); 1190 return (dr); 1191 } else if (do_free_accounting) { 1192 blkptr_t *bp = db->db_blkptr; 1193 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? 1194 bp_get_dsize(os->os_spa, bp) : db->db.db_size; 1195 /* 1196 * This is only a guess -- if the dbuf is dirty 1197 * in a previous txg, we don't know how much 1198 * space it will use on disk yet. We should 1199 * really have the struct_rwlock to access 1200 * db_blkptr, but since this is just a guess, 1201 * it's OK if we get an odd answer. 1202 */ 1203 ddt_prefetch(os->os_spa, bp); 1204 dnode_willuse_space(dn, -willfree, tx); 1205 } 1206 1207 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1208 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1209 drop_struct_lock = TRUE; 1210 } 1211 1212 if (db->db_level == 0) { 1213 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1214 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1215 } 1216 1217 if (db->db_level+1 < dn->dn_nlevels) { 1218 dmu_buf_impl_t *parent = db->db_parent; 1219 dbuf_dirty_record_t *di; 1220 int parent_held = FALSE; 1221 1222 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1223 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1224 1225 parent = dbuf_hold_level(dn, db->db_level+1, 1226 db->db_blkid >> epbs, FTAG); 1227 ASSERT(parent != NULL); 1228 parent_held = TRUE; 1229 } 1230 if (drop_struct_lock) 1231 rw_exit(&dn->dn_struct_rwlock); 1232 ASSERT3U(db->db_level+1, ==, parent->db_level); 1233 di = dbuf_dirty(parent, tx); 1234 if (parent_held) 1235 dbuf_rele(parent, FTAG); 1236 1237 mutex_enter(&db->db_mtx); 1238 /* possible race with dbuf_undirty() */ 1239 if (db->db_last_dirty == dr || 1240 dn->dn_object == DMU_META_DNODE_OBJECT) { 1241 mutex_enter(&di->dt.di.dr_mtx); 1242 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1243 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1244 list_insert_tail(&di->dt.di.dr_children, dr); 1245 mutex_exit(&di->dt.di.dr_mtx); 1246 dr->dr_parent = di; 1247 } 1248 mutex_exit(&db->db_mtx); 1249 } else { 1250 ASSERT(db->db_level+1 == dn->dn_nlevels); 1251 ASSERT(db->db_blkid < dn->dn_nblkptr); 1252 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1253 mutex_enter(&dn->dn_mtx); 1254 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1255 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1256 mutex_exit(&dn->dn_mtx); 1257 if (drop_struct_lock) 1258 rw_exit(&dn->dn_struct_rwlock); 1259 } 1260 1261 dnode_setdirty(dn, tx); 1262 DB_DNODE_EXIT(db); 1263 return (dr); 1264 } 1265 1266 /* 1267 * Return TRUE if this evicted the dbuf. 1268 */ 1269 static boolean_t 1270 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1271 { 1272 dnode_t *dn; 1273 uint64_t txg = tx->tx_txg; 1274 dbuf_dirty_record_t *dr, **drp; 1275 1276 ASSERT(txg != 0); 1277 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1278 ASSERT0(db->db_level); 1279 ASSERT(MUTEX_HELD(&db->db_mtx)); 1280 1281 /* 1282 * If this buffer is not dirty, we're done. 1283 */ 1284 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1285 if (dr->dr_txg <= txg) 1286 break; 1287 if (dr == NULL || dr->dr_txg < txg) 1288 return (B_FALSE); 1289 ASSERT(dr->dr_txg == txg); 1290 ASSERT(dr->dr_dbuf == db); 1291 1292 DB_DNODE_ENTER(db); 1293 dn = DB_DNODE(db); 1294 1295 /* 1296 * Note: This code will probably work even if there are concurrent 1297 * holders, but it is untested in that scenerio, as the ZPL and 1298 * ztest have additional locking (the range locks) that prevents 1299 * that type of concurrent access. 1300 */ 1301 ASSERT3U(refcount_count(&db->db_holds), ==, db->db_dirtycnt); 1302 1303 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1304 1305 ASSERT(db->db.db_size != 0); 1306 1307 /* XXX would be nice to fix up dn_towrite_space[] */ 1308 1309 *drp = dr->dr_next; 1310 1311 /* 1312 * Note that there are three places in dbuf_dirty() 1313 * where this dirty record may be put on a list. 1314 * Make sure to do a list_remove corresponding to 1315 * every one of those list_insert calls. 1316 */ 1317 if (dr->dr_parent) { 1318 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1319 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1320 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1321 } else if (db->db_blkid == DMU_SPILL_BLKID || 1322 db->db_level+1 == dn->dn_nlevels) { 1323 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1324 mutex_enter(&dn->dn_mtx); 1325 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1326 mutex_exit(&dn->dn_mtx); 1327 } 1328 DB_DNODE_EXIT(db); 1329 1330 if (db->db_state != DB_NOFILL) { 1331 dbuf_unoverride(dr); 1332 1333 ASSERT(db->db_buf != NULL); 1334 ASSERT(dr->dt.dl.dr_data != NULL); 1335 if (dr->dt.dl.dr_data != db->db_buf) 1336 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db)); 1337 } 1338 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1339 1340 ASSERT(db->db_dirtycnt > 0); 1341 db->db_dirtycnt -= 1; 1342 1343 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1344 arc_buf_t *buf = db->db_buf; 1345 1346 ASSERT(db->db_state == DB_NOFILL || arc_released(buf)); 1347 dbuf_set_data(db, NULL); 1348 VERIFY(arc_buf_remove_ref(buf, db)); 1349 dbuf_evict(db); 1350 return (B_TRUE); 1351 } 1352 1353 return (B_FALSE); 1354 } 1355 1356 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty 1357 void 1358 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1359 { 1360 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1361 1362 ASSERT(tx->tx_txg != 0); 1363 ASSERT(!refcount_is_zero(&db->db_holds)); 1364 1365 DB_DNODE_ENTER(db); 1366 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1367 rf |= DB_RF_HAVESTRUCT; 1368 DB_DNODE_EXIT(db); 1369 (void) dbuf_read(db, NULL, rf); 1370 (void) dbuf_dirty(db, tx); 1371 } 1372 1373 void 1374 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1375 { 1376 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1377 1378 db->db_state = DB_NOFILL; 1379 1380 dmu_buf_will_fill(db_fake, tx); 1381 } 1382 1383 void 1384 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1385 { 1386 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1387 1388 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1389 ASSERT(tx->tx_txg != 0); 1390 ASSERT(db->db_level == 0); 1391 ASSERT(!refcount_is_zero(&db->db_holds)); 1392 1393 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1394 dmu_tx_private_ok(tx)); 1395 1396 dbuf_noread(db); 1397 (void) dbuf_dirty(db, tx); 1398 } 1399 1400 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1401 /* ARGSUSED */ 1402 void 1403 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1404 { 1405 mutex_enter(&db->db_mtx); 1406 DBUF_VERIFY(db); 1407 1408 if (db->db_state == DB_FILL) { 1409 if (db->db_level == 0 && db->db_freed_in_flight) { 1410 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1411 /* we were freed while filling */ 1412 /* XXX dbuf_undirty? */ 1413 bzero(db->db.db_data, db->db.db_size); 1414 db->db_freed_in_flight = FALSE; 1415 } 1416 db->db_state = DB_CACHED; 1417 cv_broadcast(&db->db_changed); 1418 } 1419 mutex_exit(&db->db_mtx); 1420 } 1421 1422 /* 1423 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1424 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1425 */ 1426 void 1427 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1428 { 1429 ASSERT(!refcount_is_zero(&db->db_holds)); 1430 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1431 ASSERT(db->db_level == 0); 1432 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA); 1433 ASSERT(buf != NULL); 1434 ASSERT(arc_buf_size(buf) == db->db.db_size); 1435 ASSERT(tx->tx_txg != 0); 1436 1437 arc_return_buf(buf, db); 1438 ASSERT(arc_released(buf)); 1439 1440 mutex_enter(&db->db_mtx); 1441 1442 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1443 cv_wait(&db->db_changed, &db->db_mtx); 1444 1445 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1446 1447 if (db->db_state == DB_CACHED && 1448 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1449 mutex_exit(&db->db_mtx); 1450 (void) dbuf_dirty(db, tx); 1451 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1452 VERIFY(arc_buf_remove_ref(buf, db)); 1453 xuio_stat_wbuf_copied(); 1454 return; 1455 } 1456 1457 xuio_stat_wbuf_nocopy(); 1458 if (db->db_state == DB_CACHED) { 1459 dbuf_dirty_record_t *dr = db->db_last_dirty; 1460 1461 ASSERT(db->db_buf != NULL); 1462 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1463 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1464 if (!arc_released(db->db_buf)) { 1465 ASSERT(dr->dt.dl.dr_override_state == 1466 DR_OVERRIDDEN); 1467 arc_release(db->db_buf, db); 1468 } 1469 dr->dt.dl.dr_data = buf; 1470 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1471 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 1472 arc_release(db->db_buf, db); 1473 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1474 } 1475 db->db_buf = NULL; 1476 } 1477 ASSERT(db->db_buf == NULL); 1478 dbuf_set_data(db, buf); 1479 db->db_state = DB_FILL; 1480 mutex_exit(&db->db_mtx); 1481 (void) dbuf_dirty(db, tx); 1482 dbuf_fill_done(db, tx); 1483 } 1484 1485 /* 1486 * "Clear" the contents of this dbuf. This will mark the dbuf 1487 * EVICTING and clear *most* of its references. Unfortunetely, 1488 * when we are not holding the dn_dbufs_mtx, we can't clear the 1489 * entry in the dn_dbufs list. We have to wait until dbuf_destroy() 1490 * in this case. For callers from the DMU we will usually see: 1491 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() 1492 * For the arc callback, we will usually see: 1493 * dbuf_do_evict()->dbuf_clear();dbuf_destroy() 1494 * Sometimes, though, we will get a mix of these two: 1495 * DMU: dbuf_clear()->arc_buf_evict() 1496 * ARC: dbuf_do_evict()->dbuf_destroy() 1497 */ 1498 void 1499 dbuf_clear(dmu_buf_impl_t *db) 1500 { 1501 dnode_t *dn; 1502 dmu_buf_impl_t *parent = db->db_parent; 1503 dmu_buf_impl_t *dndb; 1504 int dbuf_gone = FALSE; 1505 1506 ASSERT(MUTEX_HELD(&db->db_mtx)); 1507 ASSERT(refcount_is_zero(&db->db_holds)); 1508 1509 dbuf_evict_user(db); 1510 1511 if (db->db_state == DB_CACHED) { 1512 ASSERT(db->db.db_data != NULL); 1513 if (db->db_blkid == DMU_BONUS_BLKID) { 1514 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 1515 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1516 } 1517 db->db.db_data = NULL; 1518 db->db_state = DB_UNCACHED; 1519 } 1520 1521 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1522 ASSERT(db->db_data_pending == NULL); 1523 1524 db->db_state = DB_EVICTING; 1525 db->db_blkptr = NULL; 1526 1527 DB_DNODE_ENTER(db); 1528 dn = DB_DNODE(db); 1529 dndb = dn->dn_dbuf; 1530 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { 1531 list_remove(&dn->dn_dbufs, db); 1532 (void) atomic_dec_32_nv(&dn->dn_dbufs_count); 1533 membar_producer(); 1534 DB_DNODE_EXIT(db); 1535 /* 1536 * Decrementing the dbuf count means that the hold corresponding 1537 * to the removed dbuf is no longer discounted in dnode_move(), 1538 * so the dnode cannot be moved until after we release the hold. 1539 * The membar_producer() ensures visibility of the decremented 1540 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 1541 * release any lock. 1542 */ 1543 dnode_rele(dn, db); 1544 db->db_dnode_handle = NULL; 1545 } else { 1546 DB_DNODE_EXIT(db); 1547 } 1548 1549 if (db->db_buf) 1550 dbuf_gone = arc_buf_evict(db->db_buf); 1551 1552 if (!dbuf_gone) 1553 mutex_exit(&db->db_mtx); 1554 1555 /* 1556 * If this dbuf is referenced from an indirect dbuf, 1557 * decrement the ref count on the indirect dbuf. 1558 */ 1559 if (parent && parent != dndb) 1560 dbuf_rele(parent, db); 1561 } 1562 1563 static int 1564 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 1565 dmu_buf_impl_t **parentp, blkptr_t **bpp) 1566 { 1567 int nlevels, epbs; 1568 1569 *parentp = NULL; 1570 *bpp = NULL; 1571 1572 ASSERT(blkid != DMU_BONUS_BLKID); 1573 1574 if (blkid == DMU_SPILL_BLKID) { 1575 mutex_enter(&dn->dn_mtx); 1576 if (dn->dn_have_spill && 1577 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1578 *bpp = &dn->dn_phys->dn_spill; 1579 else 1580 *bpp = NULL; 1581 dbuf_add_ref(dn->dn_dbuf, NULL); 1582 *parentp = dn->dn_dbuf; 1583 mutex_exit(&dn->dn_mtx); 1584 return (0); 1585 } 1586 1587 if (dn->dn_phys->dn_nlevels == 0) 1588 nlevels = 1; 1589 else 1590 nlevels = dn->dn_phys->dn_nlevels; 1591 1592 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1593 1594 ASSERT3U(level * epbs, <, 64); 1595 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1596 if (level >= nlevels || 1597 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 1598 /* the buffer has no parent yet */ 1599 return (SET_ERROR(ENOENT)); 1600 } else if (level < nlevels-1) { 1601 /* this block is referenced from an indirect block */ 1602 int err = dbuf_hold_impl(dn, level+1, 1603 blkid >> epbs, fail_sparse, NULL, parentp); 1604 if (err) 1605 return (err); 1606 err = dbuf_read(*parentp, NULL, 1607 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 1608 if (err) { 1609 dbuf_rele(*parentp, NULL); 1610 *parentp = NULL; 1611 return (err); 1612 } 1613 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 1614 (blkid & ((1ULL << epbs) - 1)); 1615 return (0); 1616 } else { 1617 /* the block is referenced from the dnode */ 1618 ASSERT3U(level, ==, nlevels-1); 1619 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 1620 blkid < dn->dn_phys->dn_nblkptr); 1621 if (dn->dn_dbuf) { 1622 dbuf_add_ref(dn->dn_dbuf, NULL); 1623 *parentp = dn->dn_dbuf; 1624 } 1625 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 1626 return (0); 1627 } 1628 } 1629 1630 static dmu_buf_impl_t * 1631 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 1632 dmu_buf_impl_t *parent, blkptr_t *blkptr) 1633 { 1634 objset_t *os = dn->dn_objset; 1635 dmu_buf_impl_t *db, *odb; 1636 1637 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1638 ASSERT(dn->dn_type != DMU_OT_NONE); 1639 1640 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); 1641 1642 db->db_objset = os; 1643 db->db.db_object = dn->dn_object; 1644 db->db_level = level; 1645 db->db_blkid = blkid; 1646 db->db_last_dirty = NULL; 1647 db->db_dirtycnt = 0; 1648 db->db_dnode_handle = dn->dn_handle; 1649 db->db_parent = parent; 1650 db->db_blkptr = blkptr; 1651 1652 db->db_user_ptr = NULL; 1653 db->db_user_data_ptr_ptr = NULL; 1654 db->db_evict_func = NULL; 1655 db->db_immediate_evict = 0; 1656 db->db_freed_in_flight = 0; 1657 1658 if (blkid == DMU_BONUS_BLKID) { 1659 ASSERT3P(parent, ==, dn->dn_dbuf); 1660 db->db.db_size = DN_MAX_BONUSLEN - 1661 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 1662 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1663 db->db.db_offset = DMU_BONUS_BLKID; 1664 db->db_state = DB_UNCACHED; 1665 /* the bonus dbuf is not placed in the hash table */ 1666 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1667 return (db); 1668 } else if (blkid == DMU_SPILL_BLKID) { 1669 db->db.db_size = (blkptr != NULL) ? 1670 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 1671 db->db.db_offset = 0; 1672 } else { 1673 int blocksize = 1674 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz; 1675 db->db.db_size = blocksize; 1676 db->db.db_offset = db->db_blkid * blocksize; 1677 } 1678 1679 /* 1680 * Hold the dn_dbufs_mtx while we get the new dbuf 1681 * in the hash table *and* added to the dbufs list. 1682 * This prevents a possible deadlock with someone 1683 * trying to look up this dbuf before its added to the 1684 * dn_dbufs list. 1685 */ 1686 mutex_enter(&dn->dn_dbufs_mtx); 1687 db->db_state = DB_EVICTING; 1688 if ((odb = dbuf_hash_insert(db)) != NULL) { 1689 /* someone else inserted it first */ 1690 kmem_cache_free(dbuf_cache, db); 1691 mutex_exit(&dn->dn_dbufs_mtx); 1692 return (odb); 1693 } 1694 list_insert_head(&dn->dn_dbufs, db); 1695 db->db_state = DB_UNCACHED; 1696 mutex_exit(&dn->dn_dbufs_mtx); 1697 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1698 1699 if (parent && parent != dn->dn_dbuf) 1700 dbuf_add_ref(parent, db); 1701 1702 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1703 refcount_count(&dn->dn_holds) > 0); 1704 (void) refcount_add(&dn->dn_holds, db); 1705 (void) atomic_inc_32_nv(&dn->dn_dbufs_count); 1706 1707 dprintf_dbuf(db, "db=%p\n", db); 1708 1709 return (db); 1710 } 1711 1712 static int 1713 dbuf_do_evict(void *private) 1714 { 1715 arc_buf_t *buf = private; 1716 dmu_buf_impl_t *db = buf->b_private; 1717 1718 if (!MUTEX_HELD(&db->db_mtx)) 1719 mutex_enter(&db->db_mtx); 1720 1721 ASSERT(refcount_is_zero(&db->db_holds)); 1722 1723 if (db->db_state != DB_EVICTING) { 1724 ASSERT(db->db_state == DB_CACHED); 1725 DBUF_VERIFY(db); 1726 db->db_buf = NULL; 1727 dbuf_evict(db); 1728 } else { 1729 mutex_exit(&db->db_mtx); 1730 dbuf_destroy(db); 1731 } 1732 return (0); 1733 } 1734 1735 static void 1736 dbuf_destroy(dmu_buf_impl_t *db) 1737 { 1738 ASSERT(refcount_is_zero(&db->db_holds)); 1739 1740 if (db->db_blkid != DMU_BONUS_BLKID) { 1741 /* 1742 * If this dbuf is still on the dn_dbufs list, 1743 * remove it from that list. 1744 */ 1745 if (db->db_dnode_handle != NULL) { 1746 dnode_t *dn; 1747 1748 DB_DNODE_ENTER(db); 1749 dn = DB_DNODE(db); 1750 mutex_enter(&dn->dn_dbufs_mtx); 1751 list_remove(&dn->dn_dbufs, db); 1752 (void) atomic_dec_32_nv(&dn->dn_dbufs_count); 1753 mutex_exit(&dn->dn_dbufs_mtx); 1754 DB_DNODE_EXIT(db); 1755 /* 1756 * Decrementing the dbuf count means that the hold 1757 * corresponding to the removed dbuf is no longer 1758 * discounted in dnode_move(), so the dnode cannot be 1759 * moved until after we release the hold. 1760 */ 1761 dnode_rele(dn, db); 1762 db->db_dnode_handle = NULL; 1763 } 1764 dbuf_hash_remove(db); 1765 } 1766 db->db_parent = NULL; 1767 db->db_buf = NULL; 1768 1769 ASSERT(!list_link_active(&db->db_link)); 1770 ASSERT(db->db.db_data == NULL); 1771 ASSERT(db->db_hash_next == NULL); 1772 ASSERT(db->db_blkptr == NULL); 1773 ASSERT(db->db_data_pending == NULL); 1774 1775 kmem_cache_free(dbuf_cache, db); 1776 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1777 } 1778 1779 void 1780 dbuf_prefetch(dnode_t *dn, uint64_t blkid) 1781 { 1782 dmu_buf_impl_t *db = NULL; 1783 blkptr_t *bp = NULL; 1784 1785 ASSERT(blkid != DMU_BONUS_BLKID); 1786 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1787 1788 if (dnode_block_freed(dn, blkid)) 1789 return; 1790 1791 /* dbuf_find() returns with db_mtx held */ 1792 if (db = dbuf_find(dn, 0, blkid)) { 1793 /* 1794 * This dbuf is already in the cache. We assume that 1795 * it is already CACHED, or else about to be either 1796 * read or filled. 1797 */ 1798 mutex_exit(&db->db_mtx); 1799 return; 1800 } 1801 1802 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) { 1803 if (bp && !BP_IS_HOLE(bp)) { 1804 int priority = dn->dn_type == DMU_OT_DDT_ZAP ? 1805 ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ; 1806 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 1807 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; 1808 zbookmark_t zb; 1809 1810 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 1811 dn->dn_object, 0, blkid); 1812 1813 (void) arc_read(NULL, dn->dn_objset->os_spa, 1814 bp, NULL, NULL, priority, 1815 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1816 &aflags, &zb); 1817 } 1818 if (db) 1819 dbuf_rele(db, NULL); 1820 } 1821 } 1822 1823 /* 1824 * Returns with db_holds incremented, and db_mtx not held. 1825 * Note: dn_struct_rwlock must be held. 1826 */ 1827 int 1828 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, 1829 void *tag, dmu_buf_impl_t **dbp) 1830 { 1831 dmu_buf_impl_t *db, *parent = NULL; 1832 1833 ASSERT(blkid != DMU_BONUS_BLKID); 1834 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1835 ASSERT3U(dn->dn_nlevels, >, level); 1836 1837 *dbp = NULL; 1838 top: 1839 /* dbuf_find() returns with db_mtx held */ 1840 db = dbuf_find(dn, level, blkid); 1841 1842 if (db == NULL) { 1843 blkptr_t *bp = NULL; 1844 int err; 1845 1846 ASSERT3P(parent, ==, NULL); 1847 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 1848 if (fail_sparse) { 1849 if (err == 0 && bp && BP_IS_HOLE(bp)) 1850 err = SET_ERROR(ENOENT); 1851 if (err) { 1852 if (parent) 1853 dbuf_rele(parent, NULL); 1854 return (err); 1855 } 1856 } 1857 if (err && err != ENOENT) 1858 return (err); 1859 db = dbuf_create(dn, level, blkid, parent, bp); 1860 } 1861 1862 if (db->db_buf && refcount_is_zero(&db->db_holds)) { 1863 arc_buf_add_ref(db->db_buf, db); 1864 if (db->db_buf->b_data == NULL) { 1865 dbuf_clear(db); 1866 if (parent) { 1867 dbuf_rele(parent, NULL); 1868 parent = NULL; 1869 } 1870 goto top; 1871 } 1872 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 1873 } 1874 1875 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 1876 1877 /* 1878 * If this buffer is currently syncing out, and we are are 1879 * still referencing it from db_data, we need to make a copy 1880 * of it in case we decide we want to dirty it again in this txg. 1881 */ 1882 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1883 dn->dn_object != DMU_META_DNODE_OBJECT && 1884 db->db_state == DB_CACHED && db->db_data_pending) { 1885 dbuf_dirty_record_t *dr = db->db_data_pending; 1886 1887 if (dr->dt.dl.dr_data == db->db_buf) { 1888 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1889 1890 dbuf_set_data(db, 1891 arc_buf_alloc(dn->dn_objset->os_spa, 1892 db->db.db_size, db, type)); 1893 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 1894 db->db.db_size); 1895 } 1896 } 1897 1898 (void) refcount_add(&db->db_holds, tag); 1899 dbuf_update_data(db); 1900 DBUF_VERIFY(db); 1901 mutex_exit(&db->db_mtx); 1902 1903 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 1904 if (parent) 1905 dbuf_rele(parent, NULL); 1906 1907 ASSERT3P(DB_DNODE(db), ==, dn); 1908 ASSERT3U(db->db_blkid, ==, blkid); 1909 ASSERT3U(db->db_level, ==, level); 1910 *dbp = db; 1911 1912 return (0); 1913 } 1914 1915 dmu_buf_impl_t * 1916 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 1917 { 1918 dmu_buf_impl_t *db; 1919 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db); 1920 return (err ? NULL : db); 1921 } 1922 1923 dmu_buf_impl_t * 1924 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 1925 { 1926 dmu_buf_impl_t *db; 1927 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db); 1928 return (err ? NULL : db); 1929 } 1930 1931 void 1932 dbuf_create_bonus(dnode_t *dn) 1933 { 1934 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1935 1936 ASSERT(dn->dn_bonus == NULL); 1937 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 1938 } 1939 1940 int 1941 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 1942 { 1943 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1944 dnode_t *dn; 1945 1946 if (db->db_blkid != DMU_SPILL_BLKID) 1947 return (SET_ERROR(ENOTSUP)); 1948 if (blksz == 0) 1949 blksz = SPA_MINBLOCKSIZE; 1950 if (blksz > SPA_MAXBLOCKSIZE) 1951 blksz = SPA_MAXBLOCKSIZE; 1952 else 1953 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 1954 1955 DB_DNODE_ENTER(db); 1956 dn = DB_DNODE(db); 1957 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1958 dbuf_new_size(db, blksz, tx); 1959 rw_exit(&dn->dn_struct_rwlock); 1960 DB_DNODE_EXIT(db); 1961 1962 return (0); 1963 } 1964 1965 void 1966 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 1967 { 1968 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 1969 } 1970 1971 #pragma weak dmu_buf_add_ref = dbuf_add_ref 1972 void 1973 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 1974 { 1975 int64_t holds = refcount_add(&db->db_holds, tag); 1976 ASSERT(holds > 1); 1977 } 1978 1979 /* 1980 * If you call dbuf_rele() you had better not be referencing the dnode handle 1981 * unless you have some other direct or indirect hold on the dnode. (An indirect 1982 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 1983 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 1984 * dnode's parent dbuf evicting its dnode handles. 1985 */ 1986 #pragma weak dmu_buf_rele = dbuf_rele 1987 void 1988 dbuf_rele(dmu_buf_impl_t *db, void *tag) 1989 { 1990 mutex_enter(&db->db_mtx); 1991 dbuf_rele_and_unlock(db, tag); 1992 } 1993 1994 /* 1995 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 1996 * db_dirtycnt and db_holds to be updated atomically. 1997 */ 1998 void 1999 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2000 { 2001 int64_t holds; 2002 2003 ASSERT(MUTEX_HELD(&db->db_mtx)); 2004 DBUF_VERIFY(db); 2005 2006 /* 2007 * Remove the reference to the dbuf before removing its hold on the 2008 * dnode so we can guarantee in dnode_move() that a referenced bonus 2009 * buffer has a corresponding dnode hold. 2010 */ 2011 holds = refcount_remove(&db->db_holds, tag); 2012 ASSERT(holds >= 0); 2013 2014 /* 2015 * We can't freeze indirects if there is a possibility that they 2016 * may be modified in the current syncing context. 2017 */ 2018 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) 2019 arc_buf_freeze(db->db_buf); 2020 2021 if (holds == db->db_dirtycnt && 2022 db->db_level == 0 && db->db_immediate_evict) 2023 dbuf_evict_user(db); 2024 2025 if (holds == 0) { 2026 if (db->db_blkid == DMU_BONUS_BLKID) { 2027 mutex_exit(&db->db_mtx); 2028 2029 /* 2030 * If the dnode moves here, we cannot cross this barrier 2031 * until the move completes. 2032 */ 2033 DB_DNODE_ENTER(db); 2034 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count); 2035 DB_DNODE_EXIT(db); 2036 /* 2037 * The bonus buffer's dnode hold is no longer discounted 2038 * in dnode_move(). The dnode cannot move until after 2039 * the dnode_rele(). 2040 */ 2041 dnode_rele(DB_DNODE(db), db); 2042 } else if (db->db_buf == NULL) { 2043 /* 2044 * This is a special case: we never associated this 2045 * dbuf with any data allocated from the ARC. 2046 */ 2047 ASSERT(db->db_state == DB_UNCACHED || 2048 db->db_state == DB_NOFILL); 2049 dbuf_evict(db); 2050 } else if (arc_released(db->db_buf)) { 2051 arc_buf_t *buf = db->db_buf; 2052 /* 2053 * This dbuf has anonymous data associated with it. 2054 */ 2055 dbuf_set_data(db, NULL); 2056 VERIFY(arc_buf_remove_ref(buf, db)); 2057 dbuf_evict(db); 2058 } else { 2059 VERIFY(!arc_buf_remove_ref(db->db_buf, db)); 2060 2061 /* 2062 * A dbuf will be eligible for eviction if either the 2063 * 'primarycache' property is set or a duplicate 2064 * copy of this buffer is already cached in the arc. 2065 * 2066 * In the case of the 'primarycache' a buffer 2067 * is considered for eviction if it matches the 2068 * criteria set in the property. 2069 * 2070 * To decide if our buffer is considered a 2071 * duplicate, we must call into the arc to determine 2072 * if multiple buffers are referencing the same 2073 * block on-disk. If so, then we simply evict 2074 * ourselves. 2075 */ 2076 if (!DBUF_IS_CACHEABLE(db) || 2077 arc_buf_eviction_needed(db->db_buf)) 2078 dbuf_clear(db); 2079 else 2080 mutex_exit(&db->db_mtx); 2081 } 2082 } else { 2083 mutex_exit(&db->db_mtx); 2084 } 2085 } 2086 2087 #pragma weak dmu_buf_refcount = dbuf_refcount 2088 uint64_t 2089 dbuf_refcount(dmu_buf_impl_t *db) 2090 { 2091 return (refcount_count(&db->db_holds)); 2092 } 2093 2094 void * 2095 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 2096 dmu_buf_evict_func_t *evict_func) 2097 { 2098 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 2099 user_data_ptr_ptr, evict_func)); 2100 } 2101 2102 void * 2103 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 2104 dmu_buf_evict_func_t *evict_func) 2105 { 2106 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2107 2108 db->db_immediate_evict = TRUE; 2109 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 2110 user_data_ptr_ptr, evict_func)); 2111 } 2112 2113 void * 2114 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr, 2115 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func) 2116 { 2117 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2118 ASSERT(db->db_level == 0); 2119 2120 ASSERT((user_ptr == NULL) == (evict_func == NULL)); 2121 2122 mutex_enter(&db->db_mtx); 2123 2124 if (db->db_user_ptr == old_user_ptr) { 2125 db->db_user_ptr = user_ptr; 2126 db->db_user_data_ptr_ptr = user_data_ptr_ptr; 2127 db->db_evict_func = evict_func; 2128 2129 dbuf_update_data(db); 2130 } else { 2131 old_user_ptr = db->db_user_ptr; 2132 } 2133 2134 mutex_exit(&db->db_mtx); 2135 return (old_user_ptr); 2136 } 2137 2138 void * 2139 dmu_buf_get_user(dmu_buf_t *db_fake) 2140 { 2141 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2142 ASSERT(!refcount_is_zero(&db->db_holds)); 2143 2144 return (db->db_user_ptr); 2145 } 2146 2147 boolean_t 2148 dmu_buf_freeable(dmu_buf_t *dbuf) 2149 { 2150 boolean_t res = B_FALSE; 2151 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2152 2153 if (db->db_blkptr) 2154 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset, 2155 db->db_blkptr, db->db_blkptr->blk_birth); 2156 2157 return (res); 2158 } 2159 2160 blkptr_t * 2161 dmu_buf_get_blkptr(dmu_buf_t *db) 2162 { 2163 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2164 return (dbi->db_blkptr); 2165 } 2166 2167 static void 2168 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2169 { 2170 /* ASSERT(dmu_tx_is_syncing(tx) */ 2171 ASSERT(MUTEX_HELD(&db->db_mtx)); 2172 2173 if (db->db_blkptr != NULL) 2174 return; 2175 2176 if (db->db_blkid == DMU_SPILL_BLKID) { 2177 db->db_blkptr = &dn->dn_phys->dn_spill; 2178 BP_ZERO(db->db_blkptr); 2179 return; 2180 } 2181 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2182 /* 2183 * This buffer was allocated at a time when there was 2184 * no available blkptrs from the dnode, or it was 2185 * inappropriate to hook it in (i.e., nlevels mis-match). 2186 */ 2187 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2188 ASSERT(db->db_parent == NULL); 2189 db->db_parent = dn->dn_dbuf; 2190 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2191 DBUF_VERIFY(db); 2192 } else { 2193 dmu_buf_impl_t *parent = db->db_parent; 2194 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2195 2196 ASSERT(dn->dn_phys->dn_nlevels > 1); 2197 if (parent == NULL) { 2198 mutex_exit(&db->db_mtx); 2199 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2200 (void) dbuf_hold_impl(dn, db->db_level+1, 2201 db->db_blkid >> epbs, FALSE, db, &parent); 2202 rw_exit(&dn->dn_struct_rwlock); 2203 mutex_enter(&db->db_mtx); 2204 db->db_parent = parent; 2205 } 2206 db->db_blkptr = (blkptr_t *)parent->db.db_data + 2207 (db->db_blkid & ((1ULL << epbs) - 1)); 2208 DBUF_VERIFY(db); 2209 } 2210 } 2211 2212 static void 2213 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2214 { 2215 dmu_buf_impl_t *db = dr->dr_dbuf; 2216 dnode_t *dn; 2217 zio_t *zio; 2218 2219 ASSERT(dmu_tx_is_syncing(tx)); 2220 2221 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2222 2223 mutex_enter(&db->db_mtx); 2224 2225 ASSERT(db->db_level > 0); 2226 DBUF_VERIFY(db); 2227 2228 if (db->db_buf == NULL) { 2229 mutex_exit(&db->db_mtx); 2230 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2231 mutex_enter(&db->db_mtx); 2232 } 2233 ASSERT3U(db->db_state, ==, DB_CACHED); 2234 ASSERT(db->db_buf != NULL); 2235 2236 DB_DNODE_ENTER(db); 2237 dn = DB_DNODE(db); 2238 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2239 dbuf_check_blkptr(dn, db); 2240 DB_DNODE_EXIT(db); 2241 2242 db->db_data_pending = dr; 2243 2244 mutex_exit(&db->db_mtx); 2245 dbuf_write(dr, db->db_buf, tx); 2246 2247 zio = dr->dr_zio; 2248 mutex_enter(&dr->dt.di.dr_mtx); 2249 dbuf_sync_list(&dr->dt.di.dr_children, tx); 2250 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2251 mutex_exit(&dr->dt.di.dr_mtx); 2252 zio_nowait(zio); 2253 } 2254 2255 static void 2256 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2257 { 2258 arc_buf_t **datap = &dr->dt.dl.dr_data; 2259 dmu_buf_impl_t *db = dr->dr_dbuf; 2260 dnode_t *dn; 2261 objset_t *os; 2262 uint64_t txg = tx->tx_txg; 2263 2264 ASSERT(dmu_tx_is_syncing(tx)); 2265 2266 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2267 2268 mutex_enter(&db->db_mtx); 2269 /* 2270 * To be synced, we must be dirtied. But we 2271 * might have been freed after the dirty. 2272 */ 2273 if (db->db_state == DB_UNCACHED) { 2274 /* This buffer has been freed since it was dirtied */ 2275 ASSERT(db->db.db_data == NULL); 2276 } else if (db->db_state == DB_FILL) { 2277 /* This buffer was freed and is now being re-filled */ 2278 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 2279 } else { 2280 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 2281 } 2282 DBUF_VERIFY(db); 2283 2284 DB_DNODE_ENTER(db); 2285 dn = DB_DNODE(db); 2286 2287 if (db->db_blkid == DMU_SPILL_BLKID) { 2288 mutex_enter(&dn->dn_mtx); 2289 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 2290 mutex_exit(&dn->dn_mtx); 2291 } 2292 2293 /* 2294 * If this is a bonus buffer, simply copy the bonus data into the 2295 * dnode. It will be written out when the dnode is synced (and it 2296 * will be synced, since it must have been dirty for dbuf_sync to 2297 * be called). 2298 */ 2299 if (db->db_blkid == DMU_BONUS_BLKID) { 2300 dbuf_dirty_record_t **drp; 2301 2302 ASSERT(*datap != NULL); 2303 ASSERT0(db->db_level); 2304 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 2305 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 2306 DB_DNODE_EXIT(db); 2307 2308 if (*datap != db->db.db_data) { 2309 zio_buf_free(*datap, DN_MAX_BONUSLEN); 2310 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2311 } 2312 db->db_data_pending = NULL; 2313 drp = &db->db_last_dirty; 2314 while (*drp != dr) 2315 drp = &(*drp)->dr_next; 2316 ASSERT(dr->dr_next == NULL); 2317 ASSERT(dr->dr_dbuf == db); 2318 *drp = dr->dr_next; 2319 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2320 ASSERT(db->db_dirtycnt > 0); 2321 db->db_dirtycnt -= 1; 2322 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 2323 return; 2324 } 2325 2326 os = dn->dn_objset; 2327 2328 /* 2329 * This function may have dropped the db_mtx lock allowing a dmu_sync 2330 * operation to sneak in. As a result, we need to ensure that we 2331 * don't check the dr_override_state until we have returned from 2332 * dbuf_check_blkptr. 2333 */ 2334 dbuf_check_blkptr(dn, db); 2335 2336 /* 2337 * If this buffer is in the middle of an immediate write, 2338 * wait for the synchronous IO to complete. 2339 */ 2340 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 2341 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 2342 cv_wait(&db->db_changed, &db->db_mtx); 2343 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 2344 } 2345 2346 if (db->db_state != DB_NOFILL && 2347 dn->dn_object != DMU_META_DNODE_OBJECT && 2348 refcount_count(&db->db_holds) > 1 && 2349 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 2350 *datap == db->db_buf) { 2351 /* 2352 * If this buffer is currently "in use" (i.e., there 2353 * are active holds and db_data still references it), 2354 * then make a copy before we start the write so that 2355 * any modifications from the open txg will not leak 2356 * into this write. 2357 * 2358 * NOTE: this copy does not need to be made for 2359 * objects only modified in the syncing context (e.g. 2360 * DNONE_DNODE blocks). 2361 */ 2362 int blksz = arc_buf_size(*datap); 2363 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2364 *datap = arc_buf_alloc(os->os_spa, blksz, db, type); 2365 bcopy(db->db.db_data, (*datap)->b_data, blksz); 2366 } 2367 db->db_data_pending = dr; 2368 2369 mutex_exit(&db->db_mtx); 2370 2371 dbuf_write(dr, *datap, tx); 2372 2373 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2374 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 2375 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 2376 DB_DNODE_EXIT(db); 2377 } else { 2378 /* 2379 * Although zio_nowait() does not "wait for an IO", it does 2380 * initiate the IO. If this is an empty write it seems plausible 2381 * that the IO could actually be completed before the nowait 2382 * returns. We need to DB_DNODE_EXIT() first in case 2383 * zio_nowait() invalidates the dbuf. 2384 */ 2385 DB_DNODE_EXIT(db); 2386 zio_nowait(dr->dr_zio); 2387 } 2388 } 2389 2390 void 2391 dbuf_sync_list(list_t *list, dmu_tx_t *tx) 2392 { 2393 dbuf_dirty_record_t *dr; 2394 2395 while (dr = list_head(list)) { 2396 if (dr->dr_zio != NULL) { 2397 /* 2398 * If we find an already initialized zio then we 2399 * are processing the meta-dnode, and we have finished. 2400 * The dbufs for all dnodes are put back on the list 2401 * during processing, so that we can zio_wait() 2402 * these IOs after initiating all child IOs. 2403 */ 2404 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 2405 DMU_META_DNODE_OBJECT); 2406 break; 2407 } 2408 list_remove(list, dr); 2409 if (dr->dr_dbuf->db_level > 0) 2410 dbuf_sync_indirect(dr, tx); 2411 else 2412 dbuf_sync_leaf(dr, tx); 2413 } 2414 } 2415 2416 /* ARGSUSED */ 2417 static void 2418 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 2419 { 2420 dmu_buf_impl_t *db = vdb; 2421 dnode_t *dn; 2422 blkptr_t *bp = zio->io_bp; 2423 blkptr_t *bp_orig = &zio->io_bp_orig; 2424 spa_t *spa = zio->io_spa; 2425 int64_t delta; 2426 uint64_t fill = 0; 2427 int i; 2428 2429 ASSERT(db->db_blkptr == bp); 2430 2431 DB_DNODE_ENTER(db); 2432 dn = DB_DNODE(db); 2433 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 2434 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 2435 zio->io_prev_space_delta = delta; 2436 2437 if (BP_IS_HOLE(bp)) { 2438 ASSERT(bp->blk_fill == 0); 2439 DB_DNODE_EXIT(db); 2440 return; 2441 } 2442 2443 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 2444 BP_GET_TYPE(bp) == dn->dn_type) || 2445 (db->db_blkid == DMU_SPILL_BLKID && 2446 BP_GET_TYPE(bp) == dn->dn_bonustype)); 2447 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 2448 2449 mutex_enter(&db->db_mtx); 2450 2451 #ifdef ZFS_DEBUG 2452 if (db->db_blkid == DMU_SPILL_BLKID) { 2453 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 2454 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 2455 db->db_blkptr == &dn->dn_phys->dn_spill); 2456 } 2457 #endif 2458 2459 if (db->db_level == 0) { 2460 mutex_enter(&dn->dn_mtx); 2461 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 2462 db->db_blkid != DMU_SPILL_BLKID) 2463 dn->dn_phys->dn_maxblkid = db->db_blkid; 2464 mutex_exit(&dn->dn_mtx); 2465 2466 if (dn->dn_type == DMU_OT_DNODE) { 2467 dnode_phys_t *dnp = db->db.db_data; 2468 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 2469 i--, dnp++) { 2470 if (dnp->dn_type != DMU_OT_NONE) 2471 fill++; 2472 } 2473 } else { 2474 fill = 1; 2475 } 2476 } else { 2477 blkptr_t *ibp = db->db.db_data; 2478 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2479 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 2480 if (BP_IS_HOLE(ibp)) 2481 continue; 2482 fill += ibp->blk_fill; 2483 } 2484 } 2485 DB_DNODE_EXIT(db); 2486 2487 bp->blk_fill = fill; 2488 2489 mutex_exit(&db->db_mtx); 2490 } 2491 2492 /* ARGSUSED */ 2493 static void 2494 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 2495 { 2496 dmu_buf_impl_t *db = vdb; 2497 blkptr_t *bp = zio->io_bp; 2498 blkptr_t *bp_orig = &zio->io_bp_orig; 2499 uint64_t txg = zio->io_txg; 2500 dbuf_dirty_record_t **drp, *dr; 2501 2502 ASSERT0(zio->io_error); 2503 ASSERT(db->db_blkptr == bp); 2504 2505 /* 2506 * For nopwrites and rewrites we ensure that the bp matches our 2507 * original and bypass all the accounting. 2508 */ 2509 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 2510 ASSERT(BP_EQUAL(bp, bp_orig)); 2511 } else { 2512 objset_t *os; 2513 dsl_dataset_t *ds; 2514 dmu_tx_t *tx; 2515 2516 DB_GET_OBJSET(&os, db); 2517 ds = os->os_dsl_dataset; 2518 tx = os->os_synctx; 2519 2520 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 2521 dsl_dataset_block_born(ds, bp, tx); 2522 } 2523 2524 mutex_enter(&db->db_mtx); 2525 2526 DBUF_VERIFY(db); 2527 2528 drp = &db->db_last_dirty; 2529 while ((dr = *drp) != db->db_data_pending) 2530 drp = &dr->dr_next; 2531 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2532 ASSERT(dr->dr_txg == txg); 2533 ASSERT(dr->dr_dbuf == db); 2534 ASSERT(dr->dr_next == NULL); 2535 *drp = dr->dr_next; 2536 2537 #ifdef ZFS_DEBUG 2538 if (db->db_blkid == DMU_SPILL_BLKID) { 2539 dnode_t *dn; 2540 2541 DB_DNODE_ENTER(db); 2542 dn = DB_DNODE(db); 2543 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 2544 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 2545 db->db_blkptr == &dn->dn_phys->dn_spill); 2546 DB_DNODE_EXIT(db); 2547 } 2548 #endif 2549 2550 if (db->db_level == 0) { 2551 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2552 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2553 if (db->db_state != DB_NOFILL) { 2554 if (dr->dt.dl.dr_data != db->db_buf) 2555 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 2556 db)); 2557 else if (!arc_released(db->db_buf)) 2558 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2559 } 2560 } else { 2561 dnode_t *dn; 2562 2563 DB_DNODE_ENTER(db); 2564 dn = DB_DNODE(db); 2565 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2566 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2567 if (!BP_IS_HOLE(db->db_blkptr)) { 2568 int epbs = 2569 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2570 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 2571 db->db.db_size); 2572 ASSERT3U(dn->dn_phys->dn_maxblkid 2573 >> (db->db_level * epbs), >=, db->db_blkid); 2574 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2575 } 2576 DB_DNODE_EXIT(db); 2577 mutex_destroy(&dr->dt.di.dr_mtx); 2578 list_destroy(&dr->dt.di.dr_children); 2579 } 2580 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2581 2582 cv_broadcast(&db->db_changed); 2583 ASSERT(db->db_dirtycnt > 0); 2584 db->db_dirtycnt -= 1; 2585 db->db_data_pending = NULL; 2586 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 2587 } 2588 2589 static void 2590 dbuf_write_nofill_ready(zio_t *zio) 2591 { 2592 dbuf_write_ready(zio, NULL, zio->io_private); 2593 } 2594 2595 static void 2596 dbuf_write_nofill_done(zio_t *zio) 2597 { 2598 dbuf_write_done(zio, NULL, zio->io_private); 2599 } 2600 2601 static void 2602 dbuf_write_override_ready(zio_t *zio) 2603 { 2604 dbuf_dirty_record_t *dr = zio->io_private; 2605 dmu_buf_impl_t *db = dr->dr_dbuf; 2606 2607 dbuf_write_ready(zio, NULL, db); 2608 } 2609 2610 static void 2611 dbuf_write_override_done(zio_t *zio) 2612 { 2613 dbuf_dirty_record_t *dr = zio->io_private; 2614 dmu_buf_impl_t *db = dr->dr_dbuf; 2615 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 2616 2617 mutex_enter(&db->db_mtx); 2618 if (!BP_EQUAL(zio->io_bp, obp)) { 2619 if (!BP_IS_HOLE(obp)) 2620 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 2621 arc_release(dr->dt.dl.dr_data, db); 2622 } 2623 mutex_exit(&db->db_mtx); 2624 2625 dbuf_write_done(zio, NULL, db); 2626 } 2627 2628 static void 2629 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 2630 { 2631 dmu_buf_impl_t *db = dr->dr_dbuf; 2632 dnode_t *dn; 2633 objset_t *os; 2634 dmu_buf_impl_t *parent = db->db_parent; 2635 uint64_t txg = tx->tx_txg; 2636 zbookmark_t zb; 2637 zio_prop_t zp; 2638 zio_t *zio; 2639 int wp_flag = 0; 2640 2641 DB_DNODE_ENTER(db); 2642 dn = DB_DNODE(db); 2643 os = dn->dn_objset; 2644 2645 if (db->db_state != DB_NOFILL) { 2646 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 2647 /* 2648 * Private object buffers are released here rather 2649 * than in dbuf_dirty() since they are only modified 2650 * in the syncing context and we don't want the 2651 * overhead of making multiple copies of the data. 2652 */ 2653 if (BP_IS_HOLE(db->db_blkptr)) { 2654 arc_buf_thaw(data); 2655 } else { 2656 dbuf_release_bp(db); 2657 } 2658 } 2659 } 2660 2661 if (parent != dn->dn_dbuf) { 2662 ASSERT(parent && parent->db_data_pending); 2663 ASSERT(db->db_level == parent->db_level-1); 2664 ASSERT(arc_released(parent->db_buf)); 2665 zio = parent->db_data_pending->dr_zio; 2666 } else { 2667 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 2668 db->db_blkid != DMU_SPILL_BLKID) || 2669 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 2670 if (db->db_blkid != DMU_SPILL_BLKID) 2671 ASSERT3P(db->db_blkptr, ==, 2672 &dn->dn_phys->dn_blkptr[db->db_blkid]); 2673 zio = dn->dn_zio; 2674 } 2675 2676 ASSERT(db->db_level == 0 || data == db->db_buf); 2677 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 2678 ASSERT(zio); 2679 2680 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 2681 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 2682 db->db.db_object, db->db_level, db->db_blkid); 2683 2684 if (db->db_blkid == DMU_SPILL_BLKID) 2685 wp_flag = WP_SPILL; 2686 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 2687 2688 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 2689 DB_DNODE_EXIT(db); 2690 2691 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2692 ASSERT(db->db_state != DB_NOFILL); 2693 dr->dr_zio = zio_write(zio, os->os_spa, txg, 2694 db->db_blkptr, data->b_data, arc_buf_size(data), &zp, 2695 dbuf_write_override_ready, dbuf_write_override_done, dr, 2696 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 2697 mutex_enter(&db->db_mtx); 2698 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 2699 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 2700 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 2701 mutex_exit(&db->db_mtx); 2702 } else if (db->db_state == DB_NOFILL) { 2703 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF); 2704 dr->dr_zio = zio_write(zio, os->os_spa, txg, 2705 db->db_blkptr, NULL, db->db.db_size, &zp, 2706 dbuf_write_nofill_ready, dbuf_write_nofill_done, db, 2707 ZIO_PRIORITY_ASYNC_WRITE, 2708 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 2709 } else { 2710 ASSERT(arc_released(data)); 2711 dr->dr_zio = arc_write(zio, os->os_spa, txg, 2712 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), 2713 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready, 2714 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, 2715 ZIO_FLAG_MUSTSUCCEED, &zb); 2716 } 2717 } 2718