1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_send.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dbuf.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/spa.h> 41 #include <sys/zio.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/zfeature.h> 46 #include <sys/blkptr.h> 47 #include <sys/range_tree.h> 48 49 /* 50 * Number of times that zfs_free_range() took the slow path while doing 51 * a zfs receive. A nonzero value indicates a potential performance problem. 52 */ 53 uint64_t zfs_free_range_recv_miss; 54 55 static void dbuf_destroy(dmu_buf_impl_t *db); 56 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 57 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 58 59 #ifndef __lint 60 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 61 dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp); 62 #endif /* ! __lint */ 63 64 /* 65 * Global data structures and functions for the dbuf cache. 66 */ 67 static kmem_cache_t *dbuf_cache; 68 static taskq_t *dbu_evict_taskq; 69 70 /* ARGSUSED */ 71 static int 72 dbuf_cons(void *vdb, void *unused, int kmflag) 73 { 74 dmu_buf_impl_t *db = vdb; 75 bzero(db, sizeof (dmu_buf_impl_t)); 76 77 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 78 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 79 refcount_create(&db->db_holds); 80 81 return (0); 82 } 83 84 /* ARGSUSED */ 85 static void 86 dbuf_dest(void *vdb, void *unused) 87 { 88 dmu_buf_impl_t *db = vdb; 89 mutex_destroy(&db->db_mtx); 90 cv_destroy(&db->db_changed); 91 refcount_destroy(&db->db_holds); 92 } 93 94 /* 95 * dbuf hash table routines 96 */ 97 static dbuf_hash_table_t dbuf_hash_table; 98 99 static uint64_t dbuf_hash_count; 100 101 static uint64_t 102 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 103 { 104 uintptr_t osv = (uintptr_t)os; 105 uint64_t crc = -1ULL; 106 107 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 108 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 109 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 110 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 111 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 112 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 113 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 114 115 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 116 117 return (crc); 118 } 119 120 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); 121 122 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 123 ((dbuf)->db.db_object == (obj) && \ 124 (dbuf)->db_objset == (os) && \ 125 (dbuf)->db_level == (level) && \ 126 (dbuf)->db_blkid == (blkid)) 127 128 dmu_buf_impl_t * 129 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 130 { 131 dbuf_hash_table_t *h = &dbuf_hash_table; 132 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 133 uint64_t idx = hv & h->hash_table_mask; 134 dmu_buf_impl_t *db; 135 136 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 137 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 138 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 139 mutex_enter(&db->db_mtx); 140 if (db->db_state != DB_EVICTING) { 141 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 142 return (db); 143 } 144 mutex_exit(&db->db_mtx); 145 } 146 } 147 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 148 return (NULL); 149 } 150 151 static dmu_buf_impl_t * 152 dbuf_find_bonus(objset_t *os, uint64_t object) 153 { 154 dnode_t *dn; 155 dmu_buf_impl_t *db = NULL; 156 157 if (dnode_hold(os, object, FTAG, &dn) == 0) { 158 rw_enter(&dn->dn_struct_rwlock, RW_READER); 159 if (dn->dn_bonus != NULL) { 160 db = dn->dn_bonus; 161 mutex_enter(&db->db_mtx); 162 } 163 rw_exit(&dn->dn_struct_rwlock); 164 dnode_rele(dn, FTAG); 165 } 166 return (db); 167 } 168 169 /* 170 * Insert an entry into the hash table. If there is already an element 171 * equal to elem in the hash table, then the already existing element 172 * will be returned and the new element will not be inserted. 173 * Otherwise returns NULL. 174 */ 175 static dmu_buf_impl_t * 176 dbuf_hash_insert(dmu_buf_impl_t *db) 177 { 178 dbuf_hash_table_t *h = &dbuf_hash_table; 179 objset_t *os = db->db_objset; 180 uint64_t obj = db->db.db_object; 181 int level = db->db_level; 182 uint64_t blkid = db->db_blkid; 183 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 184 uint64_t idx = hv & h->hash_table_mask; 185 dmu_buf_impl_t *dbf; 186 187 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 188 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 189 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 190 mutex_enter(&dbf->db_mtx); 191 if (dbf->db_state != DB_EVICTING) { 192 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 193 return (dbf); 194 } 195 mutex_exit(&dbf->db_mtx); 196 } 197 } 198 199 mutex_enter(&db->db_mtx); 200 db->db_hash_next = h->hash_table[idx]; 201 h->hash_table[idx] = db; 202 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 203 atomic_inc_64(&dbuf_hash_count); 204 205 return (NULL); 206 } 207 208 /* 209 * Remove an entry from the hash table. It must be in the EVICTING state. 210 */ 211 static void 212 dbuf_hash_remove(dmu_buf_impl_t *db) 213 { 214 dbuf_hash_table_t *h = &dbuf_hash_table; 215 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, 216 db->db_level, db->db_blkid); 217 uint64_t idx = hv & h->hash_table_mask; 218 dmu_buf_impl_t *dbf, **dbp; 219 220 /* 221 * We musn't hold db_mtx to maintain lock ordering: 222 * DBUF_HASH_MUTEX > db_mtx. 223 */ 224 ASSERT(refcount_is_zero(&db->db_holds)); 225 ASSERT(db->db_state == DB_EVICTING); 226 ASSERT(!MUTEX_HELD(&db->db_mtx)); 227 228 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 229 dbp = &h->hash_table[idx]; 230 while ((dbf = *dbp) != db) { 231 dbp = &dbf->db_hash_next; 232 ASSERT(dbf != NULL); 233 } 234 *dbp = db->db_hash_next; 235 db->db_hash_next = NULL; 236 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 237 atomic_dec_64(&dbuf_hash_count); 238 } 239 240 static arc_evict_func_t dbuf_do_evict; 241 242 typedef enum { 243 DBVU_EVICTING, 244 DBVU_NOT_EVICTING 245 } dbvu_verify_type_t; 246 247 static void 248 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 249 { 250 #ifdef ZFS_DEBUG 251 int64_t holds; 252 253 if (db->db_user == NULL) 254 return; 255 256 /* Only data blocks support the attachment of user data. */ 257 ASSERT(db->db_level == 0); 258 259 /* Clients must resolve a dbuf before attaching user data. */ 260 ASSERT(db->db.db_data != NULL); 261 ASSERT3U(db->db_state, ==, DB_CACHED); 262 263 holds = refcount_count(&db->db_holds); 264 if (verify_type == DBVU_EVICTING) { 265 /* 266 * Immediate eviction occurs when holds == dirtycnt. 267 * For normal eviction buffers, holds is zero on 268 * eviction, except when dbuf_fix_old_data() calls 269 * dbuf_clear_data(). However, the hold count can grow 270 * during eviction even though db_mtx is held (see 271 * dmu_bonus_hold() for an example), so we can only 272 * test the generic invariant that holds >= dirtycnt. 273 */ 274 ASSERT3U(holds, >=, db->db_dirtycnt); 275 } else { 276 if (db->db_user_immediate_evict == TRUE) 277 ASSERT3U(holds, >=, db->db_dirtycnt); 278 else 279 ASSERT3U(holds, >, 0); 280 } 281 #endif 282 } 283 284 static void 285 dbuf_evict_user(dmu_buf_impl_t *db) 286 { 287 dmu_buf_user_t *dbu = db->db_user; 288 289 ASSERT(MUTEX_HELD(&db->db_mtx)); 290 291 if (dbu == NULL) 292 return; 293 294 dbuf_verify_user(db, DBVU_EVICTING); 295 db->db_user = NULL; 296 297 #ifdef ZFS_DEBUG 298 if (dbu->dbu_clear_on_evict_dbufp != NULL) 299 *dbu->dbu_clear_on_evict_dbufp = NULL; 300 #endif 301 302 /* 303 * Invoke the callback from a taskq to avoid lock order reversals 304 * and limit stack depth. 305 */ 306 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0, 307 &dbu->dbu_tqent); 308 } 309 310 boolean_t 311 dbuf_is_metadata(dmu_buf_impl_t *db) 312 { 313 if (db->db_level > 0) { 314 return (B_TRUE); 315 } else { 316 boolean_t is_metadata; 317 318 DB_DNODE_ENTER(db); 319 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 320 DB_DNODE_EXIT(db); 321 322 return (is_metadata); 323 } 324 } 325 326 void 327 dbuf_evict(dmu_buf_impl_t *db) 328 { 329 ASSERT(MUTEX_HELD(&db->db_mtx)); 330 ASSERT(db->db_buf == NULL); 331 ASSERT(db->db_data_pending == NULL); 332 333 dbuf_clear(db); 334 dbuf_destroy(db); 335 } 336 337 void 338 dbuf_init(void) 339 { 340 uint64_t hsize = 1ULL << 16; 341 dbuf_hash_table_t *h = &dbuf_hash_table; 342 int i; 343 344 /* 345 * The hash table is big enough to fill all of physical memory 346 * with an average 4K block size. The table will take up 347 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 348 */ 349 while (hsize * 4096 < physmem * PAGESIZE) 350 hsize <<= 1; 351 352 retry: 353 h->hash_table_mask = hsize - 1; 354 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 355 if (h->hash_table == NULL) { 356 /* XXX - we should really return an error instead of assert */ 357 ASSERT(hsize > (1ULL << 10)); 358 hsize >>= 1; 359 goto retry; 360 } 361 362 dbuf_cache = kmem_cache_create("dmu_buf_impl_t", 363 sizeof (dmu_buf_impl_t), 364 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 365 366 for (i = 0; i < DBUF_MUTEXES; i++) 367 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 368 369 /* 370 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 371 * configuration is not required. 372 */ 373 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 374 } 375 376 void 377 dbuf_fini(void) 378 { 379 dbuf_hash_table_t *h = &dbuf_hash_table; 380 int i; 381 382 for (i = 0; i < DBUF_MUTEXES; i++) 383 mutex_destroy(&h->hash_mutexes[i]); 384 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 385 kmem_cache_destroy(dbuf_cache); 386 taskq_destroy(dbu_evict_taskq); 387 } 388 389 /* 390 * Other stuff. 391 */ 392 393 #ifdef ZFS_DEBUG 394 static void 395 dbuf_verify(dmu_buf_impl_t *db) 396 { 397 dnode_t *dn; 398 dbuf_dirty_record_t *dr; 399 400 ASSERT(MUTEX_HELD(&db->db_mtx)); 401 402 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 403 return; 404 405 ASSERT(db->db_objset != NULL); 406 DB_DNODE_ENTER(db); 407 dn = DB_DNODE(db); 408 if (dn == NULL) { 409 ASSERT(db->db_parent == NULL); 410 ASSERT(db->db_blkptr == NULL); 411 } else { 412 ASSERT3U(db->db.db_object, ==, dn->dn_object); 413 ASSERT3P(db->db_objset, ==, dn->dn_objset); 414 ASSERT3U(db->db_level, <, dn->dn_nlevels); 415 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 416 db->db_blkid == DMU_SPILL_BLKID || 417 !avl_is_empty(&dn->dn_dbufs)); 418 } 419 if (db->db_blkid == DMU_BONUS_BLKID) { 420 ASSERT(dn != NULL); 421 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 422 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 423 } else if (db->db_blkid == DMU_SPILL_BLKID) { 424 ASSERT(dn != NULL); 425 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 426 ASSERT0(db->db.db_offset); 427 } else { 428 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 429 } 430 431 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 432 ASSERT(dr->dr_dbuf == db); 433 434 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 435 ASSERT(dr->dr_dbuf == db); 436 437 /* 438 * We can't assert that db_size matches dn_datablksz because it 439 * can be momentarily different when another thread is doing 440 * dnode_set_blksz(). 441 */ 442 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 443 dr = db->db_data_pending; 444 /* 445 * It should only be modified in syncing context, so 446 * make sure we only have one copy of the data. 447 */ 448 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 449 } 450 451 /* verify db->db_blkptr */ 452 if (db->db_blkptr) { 453 if (db->db_parent == dn->dn_dbuf) { 454 /* db is pointed to by the dnode */ 455 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 456 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 457 ASSERT(db->db_parent == NULL); 458 else 459 ASSERT(db->db_parent != NULL); 460 if (db->db_blkid != DMU_SPILL_BLKID) 461 ASSERT3P(db->db_blkptr, ==, 462 &dn->dn_phys->dn_blkptr[db->db_blkid]); 463 } else { 464 /* db is pointed to by an indirect block */ 465 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 466 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 467 ASSERT3U(db->db_parent->db.db_object, ==, 468 db->db.db_object); 469 /* 470 * dnode_grow_indblksz() can make this fail if we don't 471 * have the struct_rwlock. XXX indblksz no longer 472 * grows. safe to do this now? 473 */ 474 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 475 ASSERT3P(db->db_blkptr, ==, 476 ((blkptr_t *)db->db_parent->db.db_data + 477 db->db_blkid % epb)); 478 } 479 } 480 } 481 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 482 (db->db_buf == NULL || db->db_buf->b_data) && 483 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 484 db->db_state != DB_FILL && !dn->dn_free_txg) { 485 /* 486 * If the blkptr isn't set but they have nonzero data, 487 * it had better be dirty, otherwise we'll lose that 488 * data when we evict this buffer. 489 * 490 * There is an exception to this rule for indirect blocks; in 491 * this case, if the indirect block is a hole, we fill in a few 492 * fields on each of the child blocks (importantly, birth time) 493 * to prevent hole birth times from being lost when you 494 * partially fill in a hole. 495 */ 496 if (db->db_dirtycnt == 0) { 497 if (db->db_level == 0) { 498 uint64_t *buf = db->db.db_data; 499 int i; 500 501 for (i = 0; i < db->db.db_size >> 3; i++) { 502 ASSERT(buf[i] == 0); 503 } 504 } else { 505 blkptr_t *bps = db->db.db_data; 506 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 507 db->db.db_size); 508 /* 509 * We want to verify that all the blkptrs in the 510 * indirect block are holes, but we may have 511 * automatically set up a few fields for them. 512 * We iterate through each blkptr and verify 513 * they only have those fields set. 514 */ 515 for (int i = 0; 516 i < db->db.db_size / sizeof (blkptr_t); 517 i++) { 518 blkptr_t *bp = &bps[i]; 519 ASSERT(ZIO_CHECKSUM_IS_ZERO( 520 &bp->blk_cksum)); 521 ASSERT( 522 DVA_IS_EMPTY(&bp->blk_dva[0]) && 523 DVA_IS_EMPTY(&bp->blk_dva[1]) && 524 DVA_IS_EMPTY(&bp->blk_dva[2])); 525 ASSERT0(bp->blk_fill); 526 ASSERT0(bp->blk_pad[0]); 527 ASSERT0(bp->blk_pad[1]); 528 ASSERT(!BP_IS_EMBEDDED(bp)); 529 ASSERT(BP_IS_HOLE(bp)); 530 ASSERT0(bp->blk_phys_birth); 531 } 532 } 533 } 534 } 535 DB_DNODE_EXIT(db); 536 } 537 #endif 538 539 static void 540 dbuf_clear_data(dmu_buf_impl_t *db) 541 { 542 ASSERT(MUTEX_HELD(&db->db_mtx)); 543 dbuf_evict_user(db); 544 db->db_buf = NULL; 545 db->db.db_data = NULL; 546 if (db->db_state != DB_NOFILL) 547 db->db_state = DB_UNCACHED; 548 } 549 550 static void 551 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 552 { 553 ASSERT(MUTEX_HELD(&db->db_mtx)); 554 ASSERT(buf != NULL); 555 556 db->db_buf = buf; 557 ASSERT(buf->b_data != NULL); 558 db->db.db_data = buf->b_data; 559 if (!arc_released(buf)) 560 arc_set_callback(buf, dbuf_do_evict, db); 561 } 562 563 /* 564 * Loan out an arc_buf for read. Return the loaned arc_buf. 565 */ 566 arc_buf_t * 567 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 568 { 569 arc_buf_t *abuf; 570 571 mutex_enter(&db->db_mtx); 572 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 573 int blksz = db->db.db_size; 574 spa_t *spa = db->db_objset->os_spa; 575 576 mutex_exit(&db->db_mtx); 577 abuf = arc_loan_buf(spa, blksz); 578 bcopy(db->db.db_data, abuf->b_data, blksz); 579 } else { 580 abuf = db->db_buf; 581 arc_loan_inuse_buf(abuf, db); 582 dbuf_clear_data(db); 583 mutex_exit(&db->db_mtx); 584 } 585 return (abuf); 586 } 587 588 /* 589 * Calculate which level n block references the data at the level 0 offset 590 * provided. 591 */ 592 uint64_t 593 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 594 { 595 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 596 /* 597 * The level n blkid is equal to the level 0 blkid divided by 598 * the number of level 0s in a level n block. 599 * 600 * The level 0 blkid is offset >> datablkshift = 601 * offset / 2^datablkshift. 602 * 603 * The number of level 0s in a level n is the number of block 604 * pointers in an indirect block, raised to the power of level. 605 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 606 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 607 * 608 * Thus, the level n blkid is: offset / 609 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 610 * = offset / 2^(datablkshift + level * 611 * (indblkshift - SPA_BLKPTRSHIFT)) 612 * = offset >> (datablkshift + level * 613 * (indblkshift - SPA_BLKPTRSHIFT)) 614 */ 615 return (offset >> (dn->dn_datablkshift + level * 616 (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 617 } else { 618 ASSERT3U(offset, <, dn->dn_datablksz); 619 return (0); 620 } 621 } 622 623 static void 624 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 625 { 626 dmu_buf_impl_t *db = vdb; 627 628 mutex_enter(&db->db_mtx); 629 ASSERT3U(db->db_state, ==, DB_READ); 630 /* 631 * All reads are synchronous, so we must have a hold on the dbuf 632 */ 633 ASSERT(refcount_count(&db->db_holds) > 0); 634 ASSERT(db->db_buf == NULL); 635 ASSERT(db->db.db_data == NULL); 636 if (db->db_level == 0 && db->db_freed_in_flight) { 637 /* we were freed in flight; disregard any error */ 638 arc_release(buf, db); 639 bzero(buf->b_data, db->db.db_size); 640 arc_buf_freeze(buf); 641 db->db_freed_in_flight = FALSE; 642 dbuf_set_data(db, buf); 643 db->db_state = DB_CACHED; 644 } else if (zio == NULL || zio->io_error == 0) { 645 dbuf_set_data(db, buf); 646 db->db_state = DB_CACHED; 647 } else { 648 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 649 ASSERT3P(db->db_buf, ==, NULL); 650 VERIFY(arc_buf_remove_ref(buf, db)); 651 db->db_state = DB_UNCACHED; 652 } 653 cv_broadcast(&db->db_changed); 654 dbuf_rele_and_unlock(db, NULL); 655 } 656 657 static void 658 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 659 { 660 dnode_t *dn; 661 zbookmark_phys_t zb; 662 arc_flags_t aflags = ARC_FLAG_NOWAIT; 663 664 DB_DNODE_ENTER(db); 665 dn = DB_DNODE(db); 666 ASSERT(!refcount_is_zero(&db->db_holds)); 667 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 668 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 669 ASSERT(MUTEX_HELD(&db->db_mtx)); 670 ASSERT(db->db_state == DB_UNCACHED); 671 ASSERT(db->db_buf == NULL); 672 673 if (db->db_blkid == DMU_BONUS_BLKID) { 674 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 675 676 ASSERT3U(bonuslen, <=, db->db.db_size); 677 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 678 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 679 if (bonuslen < DN_MAX_BONUSLEN) 680 bzero(db->db.db_data, DN_MAX_BONUSLEN); 681 if (bonuslen) 682 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 683 DB_DNODE_EXIT(db); 684 db->db_state = DB_CACHED; 685 mutex_exit(&db->db_mtx); 686 return; 687 } 688 689 /* 690 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 691 * processes the delete record and clears the bp while we are waiting 692 * for the dn_mtx (resulting in a "no" from block_freed). 693 */ 694 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 695 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 696 BP_IS_HOLE(db->db_blkptr)))) { 697 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 698 699 dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa, 700 db->db.db_size, db, type)); 701 bzero(db->db.db_data, db->db.db_size); 702 703 if (db->db_blkptr != NULL && db->db_level > 0 && 704 BP_IS_HOLE(db->db_blkptr) && 705 db->db_blkptr->blk_birth != 0) { 706 blkptr_t *bps = db->db.db_data; 707 for (int i = 0; i < ((1 << 708 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 709 i++) { 710 blkptr_t *bp = &bps[i]; 711 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 712 1 << dn->dn_indblkshift); 713 BP_SET_LSIZE(bp, 714 BP_GET_LEVEL(db->db_blkptr) == 1 ? 715 dn->dn_datablksz : 716 BP_GET_LSIZE(db->db_blkptr)); 717 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 718 BP_SET_LEVEL(bp, 719 BP_GET_LEVEL(db->db_blkptr) - 1); 720 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 721 } 722 } 723 DB_DNODE_EXIT(db); 724 db->db_state = DB_CACHED; 725 mutex_exit(&db->db_mtx); 726 return; 727 } 728 729 DB_DNODE_EXIT(db); 730 731 db->db_state = DB_READ; 732 mutex_exit(&db->db_mtx); 733 734 if (DBUF_IS_L2CACHEABLE(db)) 735 aflags |= ARC_FLAG_L2CACHE; 736 if (DBUF_IS_L2COMPRESSIBLE(db)) 737 aflags |= ARC_FLAG_L2COMPRESS; 738 739 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 740 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 741 db->db.db_object, db->db_level, db->db_blkid); 742 743 dbuf_add_ref(db, NULL); 744 745 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 746 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 747 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 748 &aflags, &zb); 749 } 750 751 int 752 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 753 { 754 int err = 0; 755 boolean_t havepzio = (zio != NULL); 756 boolean_t prefetch; 757 dnode_t *dn; 758 759 /* 760 * We don't have to hold the mutex to check db_state because it 761 * can't be freed while we have a hold on the buffer. 762 */ 763 ASSERT(!refcount_is_zero(&db->db_holds)); 764 765 if (db->db_state == DB_NOFILL) 766 return (SET_ERROR(EIO)); 767 768 DB_DNODE_ENTER(db); 769 dn = DB_DNODE(db); 770 if ((flags & DB_RF_HAVESTRUCT) == 0) 771 rw_enter(&dn->dn_struct_rwlock, RW_READER); 772 773 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 774 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 775 DBUF_IS_CACHEABLE(db); 776 777 mutex_enter(&db->db_mtx); 778 if (db->db_state == DB_CACHED) { 779 mutex_exit(&db->db_mtx); 780 if (prefetch) 781 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 782 if ((flags & DB_RF_HAVESTRUCT) == 0) 783 rw_exit(&dn->dn_struct_rwlock); 784 DB_DNODE_EXIT(db); 785 } else if (db->db_state == DB_UNCACHED) { 786 spa_t *spa = dn->dn_objset->os_spa; 787 788 if (zio == NULL) 789 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 790 dbuf_read_impl(db, zio, flags); 791 792 /* dbuf_read_impl has dropped db_mtx for us */ 793 794 if (prefetch) 795 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 796 797 if ((flags & DB_RF_HAVESTRUCT) == 0) 798 rw_exit(&dn->dn_struct_rwlock); 799 DB_DNODE_EXIT(db); 800 801 if (!havepzio) 802 err = zio_wait(zio); 803 } else { 804 /* 805 * Another reader came in while the dbuf was in flight 806 * between UNCACHED and CACHED. Either a writer will finish 807 * writing the buffer (sending the dbuf to CACHED) or the 808 * first reader's request will reach the read_done callback 809 * and send the dbuf to CACHED. Otherwise, a failure 810 * occurred and the dbuf went to UNCACHED. 811 */ 812 mutex_exit(&db->db_mtx); 813 if (prefetch) 814 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 815 if ((flags & DB_RF_HAVESTRUCT) == 0) 816 rw_exit(&dn->dn_struct_rwlock); 817 DB_DNODE_EXIT(db); 818 819 /* Skip the wait per the caller's request. */ 820 mutex_enter(&db->db_mtx); 821 if ((flags & DB_RF_NEVERWAIT) == 0) { 822 while (db->db_state == DB_READ || 823 db->db_state == DB_FILL) { 824 ASSERT(db->db_state == DB_READ || 825 (flags & DB_RF_HAVESTRUCT) == 0); 826 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 827 db, zio_t *, zio); 828 cv_wait(&db->db_changed, &db->db_mtx); 829 } 830 if (db->db_state == DB_UNCACHED) 831 err = SET_ERROR(EIO); 832 } 833 mutex_exit(&db->db_mtx); 834 } 835 836 ASSERT(err || havepzio || db->db_state == DB_CACHED); 837 return (err); 838 } 839 840 static void 841 dbuf_noread(dmu_buf_impl_t *db) 842 { 843 ASSERT(!refcount_is_zero(&db->db_holds)); 844 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 845 mutex_enter(&db->db_mtx); 846 while (db->db_state == DB_READ || db->db_state == DB_FILL) 847 cv_wait(&db->db_changed, &db->db_mtx); 848 if (db->db_state == DB_UNCACHED) { 849 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 850 spa_t *spa = db->db_objset->os_spa; 851 852 ASSERT(db->db_buf == NULL); 853 ASSERT(db->db.db_data == NULL); 854 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type)); 855 db->db_state = DB_FILL; 856 } else if (db->db_state == DB_NOFILL) { 857 dbuf_clear_data(db); 858 } else { 859 ASSERT3U(db->db_state, ==, DB_CACHED); 860 } 861 mutex_exit(&db->db_mtx); 862 } 863 864 /* 865 * This is our just-in-time copy function. It makes a copy of 866 * buffers, that have been modified in a previous transaction 867 * group, before we modify them in the current active group. 868 * 869 * This function is used in two places: when we are dirtying a 870 * buffer for the first time in a txg, and when we are freeing 871 * a range in a dnode that includes this buffer. 872 * 873 * Note that when we are called from dbuf_free_range() we do 874 * not put a hold on the buffer, we just traverse the active 875 * dbuf list for the dnode. 876 */ 877 static void 878 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 879 { 880 dbuf_dirty_record_t *dr = db->db_last_dirty; 881 882 ASSERT(MUTEX_HELD(&db->db_mtx)); 883 ASSERT(db->db.db_data != NULL); 884 ASSERT(db->db_level == 0); 885 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 886 887 if (dr == NULL || 888 (dr->dt.dl.dr_data != 889 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 890 return; 891 892 /* 893 * If the last dirty record for this dbuf has not yet synced 894 * and its referencing the dbuf data, either: 895 * reset the reference to point to a new copy, 896 * or (if there a no active holders) 897 * just null out the current db_data pointer. 898 */ 899 ASSERT(dr->dr_txg >= txg - 2); 900 if (db->db_blkid == DMU_BONUS_BLKID) { 901 /* Note that the data bufs here are zio_bufs */ 902 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 903 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 904 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 905 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 906 int size = db->db.db_size; 907 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 908 spa_t *spa = db->db_objset->os_spa; 909 910 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type); 911 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 912 } else { 913 dbuf_clear_data(db); 914 } 915 } 916 917 void 918 dbuf_unoverride(dbuf_dirty_record_t *dr) 919 { 920 dmu_buf_impl_t *db = dr->dr_dbuf; 921 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 922 uint64_t txg = dr->dr_txg; 923 924 ASSERT(MUTEX_HELD(&db->db_mtx)); 925 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 926 ASSERT(db->db_level == 0); 927 928 if (db->db_blkid == DMU_BONUS_BLKID || 929 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 930 return; 931 932 ASSERT(db->db_data_pending != dr); 933 934 /* free this block */ 935 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 936 zio_free(db->db_objset->os_spa, txg, bp); 937 938 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 939 dr->dt.dl.dr_nopwrite = B_FALSE; 940 941 /* 942 * Release the already-written buffer, so we leave it in 943 * a consistent dirty state. Note that all callers are 944 * modifying the buffer, so they will immediately do 945 * another (redundant) arc_release(). Therefore, leave 946 * the buf thawed to save the effort of freezing & 947 * immediately re-thawing it. 948 */ 949 arc_release(dr->dt.dl.dr_data, db); 950 } 951 952 /* 953 * Evict (if its unreferenced) or clear (if its referenced) any level-0 954 * data blocks in the free range, so that any future readers will find 955 * empty blocks. 956 * 957 * This is a no-op if the dataset is in the middle of an incremental 958 * receive; see comment below for details. 959 */ 960 void 961 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 962 dmu_tx_t *tx) 963 { 964 dmu_buf_impl_t db_search; 965 dmu_buf_impl_t *db, *db_next; 966 uint64_t txg = tx->tx_txg; 967 avl_index_t where; 968 boolean_t freespill = 969 (start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID); 970 971 if (end_blkid > dn->dn_maxblkid && !freespill) 972 end_blkid = dn->dn_maxblkid; 973 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 974 975 db_search.db_level = 0; 976 db_search.db_blkid = start_blkid; 977 db_search.db_state = DB_SEARCH; 978 979 mutex_enter(&dn->dn_dbufs_mtx); 980 if (start_blkid >= dn->dn_unlisted_l0_blkid && !freespill) { 981 /* There can't be any dbufs in this range; no need to search. */ 982 #ifdef DEBUG 983 db = avl_find(&dn->dn_dbufs, &db_search, &where); 984 ASSERT3P(db, ==, NULL); 985 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 986 ASSERT(db == NULL || db->db_level > 0); 987 #endif 988 mutex_exit(&dn->dn_dbufs_mtx); 989 return; 990 } else if (dmu_objset_is_receiving(dn->dn_objset)) { 991 /* 992 * If we are receiving, we expect there to be no dbufs in 993 * the range to be freed, because receive modifies each 994 * block at most once, and in offset order. If this is 995 * not the case, it can lead to performance problems, 996 * so note that we unexpectedly took the slow path. 997 */ 998 atomic_inc_64(&zfs_free_range_recv_miss); 999 } 1000 1001 db = avl_find(&dn->dn_dbufs, &db_search, &where); 1002 ASSERT3P(db, ==, NULL); 1003 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 1004 1005 for (; db != NULL; db = db_next) { 1006 db_next = AVL_NEXT(&dn->dn_dbufs, db); 1007 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1008 1009 if (db->db_level != 0 || db->db_blkid > end_blkid) { 1010 break; 1011 } 1012 ASSERT3U(db->db_blkid, >=, start_blkid); 1013 1014 /* found a level 0 buffer in the range */ 1015 mutex_enter(&db->db_mtx); 1016 if (dbuf_undirty(db, tx)) { 1017 /* mutex has been dropped and dbuf destroyed */ 1018 continue; 1019 } 1020 1021 if (db->db_state == DB_UNCACHED || 1022 db->db_state == DB_NOFILL || 1023 db->db_state == DB_EVICTING) { 1024 ASSERT(db->db.db_data == NULL); 1025 mutex_exit(&db->db_mtx); 1026 continue; 1027 } 1028 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1029 /* will be handled in dbuf_read_done or dbuf_rele */ 1030 db->db_freed_in_flight = TRUE; 1031 mutex_exit(&db->db_mtx); 1032 continue; 1033 } 1034 if (refcount_count(&db->db_holds) == 0) { 1035 ASSERT(db->db_buf); 1036 dbuf_clear(db); 1037 continue; 1038 } 1039 /* The dbuf is referenced */ 1040 1041 if (db->db_last_dirty != NULL) { 1042 dbuf_dirty_record_t *dr = db->db_last_dirty; 1043 1044 if (dr->dr_txg == txg) { 1045 /* 1046 * This buffer is "in-use", re-adjust the file 1047 * size to reflect that this buffer may 1048 * contain new data when we sync. 1049 */ 1050 if (db->db_blkid != DMU_SPILL_BLKID && 1051 db->db_blkid > dn->dn_maxblkid) 1052 dn->dn_maxblkid = db->db_blkid; 1053 dbuf_unoverride(dr); 1054 } else { 1055 /* 1056 * This dbuf is not dirty in the open context. 1057 * Either uncache it (if its not referenced in 1058 * the open context) or reset its contents to 1059 * empty. 1060 */ 1061 dbuf_fix_old_data(db, txg); 1062 } 1063 } 1064 /* clear the contents if its cached */ 1065 if (db->db_state == DB_CACHED) { 1066 ASSERT(db->db.db_data != NULL); 1067 arc_release(db->db_buf, db); 1068 bzero(db->db.db_data, db->db.db_size); 1069 arc_buf_freeze(db->db_buf); 1070 } 1071 1072 mutex_exit(&db->db_mtx); 1073 } 1074 mutex_exit(&dn->dn_dbufs_mtx); 1075 } 1076 1077 static int 1078 dbuf_block_freeable(dmu_buf_impl_t *db) 1079 { 1080 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; 1081 uint64_t birth_txg = 0; 1082 1083 /* 1084 * We don't need any locking to protect db_blkptr: 1085 * If it's syncing, then db_last_dirty will be set 1086 * so we'll ignore db_blkptr. 1087 * 1088 * This logic ensures that only block births for 1089 * filled blocks are considered. 1090 */ 1091 ASSERT(MUTEX_HELD(&db->db_mtx)); 1092 if (db->db_last_dirty && (db->db_blkptr == NULL || 1093 !BP_IS_HOLE(db->db_blkptr))) { 1094 birth_txg = db->db_last_dirty->dr_txg; 1095 } else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1096 birth_txg = db->db_blkptr->blk_birth; 1097 } 1098 1099 /* 1100 * If this block don't exist or is in a snapshot, it can't be freed. 1101 * Don't pass the bp to dsl_dataset_block_freeable() since we 1102 * are holding the db_mtx lock and might deadlock if we are 1103 * prefetching a dedup-ed block. 1104 */ 1105 if (birth_txg != 0) 1106 return (ds == NULL || 1107 dsl_dataset_block_freeable(ds, NULL, birth_txg)); 1108 else 1109 return (B_FALSE); 1110 } 1111 1112 void 1113 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1114 { 1115 arc_buf_t *buf, *obuf; 1116 int osize = db->db.db_size; 1117 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1118 dnode_t *dn; 1119 1120 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1121 1122 DB_DNODE_ENTER(db); 1123 dn = DB_DNODE(db); 1124 1125 /* XXX does *this* func really need the lock? */ 1126 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1127 1128 /* 1129 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1130 * is OK, because there can be no other references to the db 1131 * when we are changing its size, so no concurrent DB_FILL can 1132 * be happening. 1133 */ 1134 /* 1135 * XXX we should be doing a dbuf_read, checking the return 1136 * value and returning that up to our callers 1137 */ 1138 dmu_buf_will_dirty(&db->db, tx); 1139 1140 /* create the data buffer for the new block */ 1141 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type); 1142 1143 /* copy old block data to the new block */ 1144 obuf = db->db_buf; 1145 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1146 /* zero the remainder */ 1147 if (size > osize) 1148 bzero((uint8_t *)buf->b_data + osize, size - osize); 1149 1150 mutex_enter(&db->db_mtx); 1151 dbuf_set_data(db, buf); 1152 VERIFY(arc_buf_remove_ref(obuf, db)); 1153 db->db.db_size = size; 1154 1155 if (db->db_level == 0) { 1156 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1157 db->db_last_dirty->dt.dl.dr_data = buf; 1158 } 1159 mutex_exit(&db->db_mtx); 1160 1161 dnode_willuse_space(dn, size-osize, tx); 1162 DB_DNODE_EXIT(db); 1163 } 1164 1165 void 1166 dbuf_release_bp(dmu_buf_impl_t *db) 1167 { 1168 objset_t *os = db->db_objset; 1169 1170 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1171 ASSERT(arc_released(os->os_phys_buf) || 1172 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1173 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1174 1175 (void) arc_release(db->db_buf, db); 1176 } 1177 1178 /* 1179 * We already have a dirty record for this TXG, and we are being 1180 * dirtied again. 1181 */ 1182 static void 1183 dbuf_redirty(dbuf_dirty_record_t *dr) 1184 { 1185 dmu_buf_impl_t *db = dr->dr_dbuf; 1186 1187 ASSERT(MUTEX_HELD(&db->db_mtx)); 1188 1189 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1190 /* 1191 * If this buffer has already been written out, 1192 * we now need to reset its state. 1193 */ 1194 dbuf_unoverride(dr); 1195 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1196 db->db_state != DB_NOFILL) { 1197 /* Already released on initial dirty, so just thaw. */ 1198 ASSERT(arc_released(db->db_buf)); 1199 arc_buf_thaw(db->db_buf); 1200 } 1201 } 1202 } 1203 1204 dbuf_dirty_record_t * 1205 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1206 { 1207 dnode_t *dn; 1208 objset_t *os; 1209 dbuf_dirty_record_t **drp, *dr; 1210 int drop_struct_lock = FALSE; 1211 boolean_t do_free_accounting = B_FALSE; 1212 int txgoff = tx->tx_txg & TXG_MASK; 1213 1214 ASSERT(tx->tx_txg != 0); 1215 ASSERT(!refcount_is_zero(&db->db_holds)); 1216 DMU_TX_DIRTY_BUF(tx, db); 1217 1218 DB_DNODE_ENTER(db); 1219 dn = DB_DNODE(db); 1220 /* 1221 * Shouldn't dirty a regular buffer in syncing context. Private 1222 * objects may be dirtied in syncing context, but only if they 1223 * were already pre-dirtied in open context. 1224 */ 1225 ASSERT(!dmu_tx_is_syncing(tx) || 1226 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1227 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1228 dn->dn_objset->os_dsl_dataset == NULL); 1229 /* 1230 * We make this assert for private objects as well, but after we 1231 * check if we're already dirty. They are allowed to re-dirty 1232 * in syncing context. 1233 */ 1234 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1235 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1236 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1237 1238 mutex_enter(&db->db_mtx); 1239 /* 1240 * XXX make this true for indirects too? The problem is that 1241 * transactions created with dmu_tx_create_assigned() from 1242 * syncing context don't bother holding ahead. 1243 */ 1244 ASSERT(db->db_level != 0 || 1245 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1246 db->db_state == DB_NOFILL); 1247 1248 mutex_enter(&dn->dn_mtx); 1249 /* 1250 * Don't set dirtyctx to SYNC if we're just modifying this as we 1251 * initialize the objset. 1252 */ 1253 if (dn->dn_dirtyctx == DN_UNDIRTIED && 1254 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1255 dn->dn_dirtyctx = 1256 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1257 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1258 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1259 } 1260 mutex_exit(&dn->dn_mtx); 1261 1262 if (db->db_blkid == DMU_SPILL_BLKID) 1263 dn->dn_have_spill = B_TRUE; 1264 1265 /* 1266 * If this buffer is already dirty, we're done. 1267 */ 1268 drp = &db->db_last_dirty; 1269 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1270 db->db.db_object == DMU_META_DNODE_OBJECT); 1271 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1272 drp = &dr->dr_next; 1273 if (dr && dr->dr_txg == tx->tx_txg) { 1274 DB_DNODE_EXIT(db); 1275 1276 dbuf_redirty(dr); 1277 mutex_exit(&db->db_mtx); 1278 return (dr); 1279 } 1280 1281 /* 1282 * Only valid if not already dirty. 1283 */ 1284 ASSERT(dn->dn_object == 0 || 1285 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1286 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1287 1288 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1289 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1290 dn->dn_phys->dn_nlevels > db->db_level || 1291 dn->dn_next_nlevels[txgoff] > db->db_level || 1292 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1293 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1294 1295 /* 1296 * We should only be dirtying in syncing context if it's the 1297 * mos or we're initializing the os or it's a special object. 1298 * However, we are allowed to dirty in syncing context provided 1299 * we already dirtied it in open context. Hence we must make 1300 * this assertion only if we're not already dirty. 1301 */ 1302 os = dn->dn_objset; 1303 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1304 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1305 ASSERT(db->db.db_size != 0); 1306 1307 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1308 1309 if (db->db_blkid != DMU_BONUS_BLKID) { 1310 /* 1311 * Update the accounting. 1312 * Note: we delay "free accounting" until after we drop 1313 * the db_mtx. This keeps us from grabbing other locks 1314 * (and possibly deadlocking) in bp_get_dsize() while 1315 * also holding the db_mtx. 1316 */ 1317 dnode_willuse_space(dn, db->db.db_size, tx); 1318 do_free_accounting = dbuf_block_freeable(db); 1319 } 1320 1321 /* 1322 * If this buffer is dirty in an old transaction group we need 1323 * to make a copy of it so that the changes we make in this 1324 * transaction group won't leak out when we sync the older txg. 1325 */ 1326 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1327 if (db->db_level == 0) { 1328 void *data_old = db->db_buf; 1329 1330 if (db->db_state != DB_NOFILL) { 1331 if (db->db_blkid == DMU_BONUS_BLKID) { 1332 dbuf_fix_old_data(db, tx->tx_txg); 1333 data_old = db->db.db_data; 1334 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1335 /* 1336 * Release the data buffer from the cache so 1337 * that we can modify it without impacting 1338 * possible other users of this cached data 1339 * block. Note that indirect blocks and 1340 * private objects are not released until the 1341 * syncing state (since they are only modified 1342 * then). 1343 */ 1344 arc_release(db->db_buf, db); 1345 dbuf_fix_old_data(db, tx->tx_txg); 1346 data_old = db->db_buf; 1347 } 1348 ASSERT(data_old != NULL); 1349 } 1350 dr->dt.dl.dr_data = data_old; 1351 } else { 1352 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1353 list_create(&dr->dt.di.dr_children, 1354 sizeof (dbuf_dirty_record_t), 1355 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1356 } 1357 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 1358 dr->dr_accounted = db->db.db_size; 1359 dr->dr_dbuf = db; 1360 dr->dr_txg = tx->tx_txg; 1361 dr->dr_next = *drp; 1362 *drp = dr; 1363 1364 /* 1365 * We could have been freed_in_flight between the dbuf_noread 1366 * and dbuf_dirty. We win, as though the dbuf_noread() had 1367 * happened after the free. 1368 */ 1369 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1370 db->db_blkid != DMU_SPILL_BLKID) { 1371 mutex_enter(&dn->dn_mtx); 1372 if (dn->dn_free_ranges[txgoff] != NULL) { 1373 range_tree_clear(dn->dn_free_ranges[txgoff], 1374 db->db_blkid, 1); 1375 } 1376 mutex_exit(&dn->dn_mtx); 1377 db->db_freed_in_flight = FALSE; 1378 } 1379 1380 /* 1381 * This buffer is now part of this txg 1382 */ 1383 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1384 db->db_dirtycnt += 1; 1385 ASSERT3U(db->db_dirtycnt, <=, 3); 1386 1387 mutex_exit(&db->db_mtx); 1388 1389 if (db->db_blkid == DMU_BONUS_BLKID || 1390 db->db_blkid == DMU_SPILL_BLKID) { 1391 mutex_enter(&dn->dn_mtx); 1392 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1393 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1394 mutex_exit(&dn->dn_mtx); 1395 dnode_setdirty(dn, tx); 1396 DB_DNODE_EXIT(db); 1397 return (dr); 1398 } else if (do_free_accounting) { 1399 blkptr_t *bp = db->db_blkptr; 1400 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? 1401 bp_get_dsize(os->os_spa, bp) : db->db.db_size; 1402 /* 1403 * This is only a guess -- if the dbuf is dirty 1404 * in a previous txg, we don't know how much 1405 * space it will use on disk yet. We should 1406 * really have the struct_rwlock to access 1407 * db_blkptr, but since this is just a guess, 1408 * it's OK if we get an odd answer. 1409 */ 1410 ddt_prefetch(os->os_spa, bp); 1411 dnode_willuse_space(dn, -willfree, tx); 1412 } 1413 1414 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1415 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1416 drop_struct_lock = TRUE; 1417 } 1418 1419 if (db->db_level == 0) { 1420 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1421 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1422 } 1423 1424 if (db->db_level+1 < dn->dn_nlevels) { 1425 dmu_buf_impl_t *parent = db->db_parent; 1426 dbuf_dirty_record_t *di; 1427 int parent_held = FALSE; 1428 1429 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1430 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1431 1432 parent = dbuf_hold_level(dn, db->db_level+1, 1433 db->db_blkid >> epbs, FTAG); 1434 ASSERT(parent != NULL); 1435 parent_held = TRUE; 1436 } 1437 if (drop_struct_lock) 1438 rw_exit(&dn->dn_struct_rwlock); 1439 ASSERT3U(db->db_level+1, ==, parent->db_level); 1440 di = dbuf_dirty(parent, tx); 1441 if (parent_held) 1442 dbuf_rele(parent, FTAG); 1443 1444 mutex_enter(&db->db_mtx); 1445 /* 1446 * Since we've dropped the mutex, it's possible that 1447 * dbuf_undirty() might have changed this out from under us. 1448 */ 1449 if (db->db_last_dirty == dr || 1450 dn->dn_object == DMU_META_DNODE_OBJECT) { 1451 mutex_enter(&di->dt.di.dr_mtx); 1452 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1453 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1454 list_insert_tail(&di->dt.di.dr_children, dr); 1455 mutex_exit(&di->dt.di.dr_mtx); 1456 dr->dr_parent = di; 1457 } 1458 mutex_exit(&db->db_mtx); 1459 } else { 1460 ASSERT(db->db_level+1 == dn->dn_nlevels); 1461 ASSERT(db->db_blkid < dn->dn_nblkptr); 1462 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1463 mutex_enter(&dn->dn_mtx); 1464 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1465 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1466 mutex_exit(&dn->dn_mtx); 1467 if (drop_struct_lock) 1468 rw_exit(&dn->dn_struct_rwlock); 1469 } 1470 1471 dnode_setdirty(dn, tx); 1472 DB_DNODE_EXIT(db); 1473 return (dr); 1474 } 1475 1476 /* 1477 * Undirty a buffer in the transaction group referenced by the given 1478 * transaction. Return whether this evicted the dbuf. 1479 */ 1480 static boolean_t 1481 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1482 { 1483 dnode_t *dn; 1484 uint64_t txg = tx->tx_txg; 1485 dbuf_dirty_record_t *dr, **drp; 1486 1487 ASSERT(txg != 0); 1488 1489 /* 1490 * Due to our use of dn_nlevels below, this can only be called 1491 * in open context, unless we are operating on the MOS. 1492 * From syncing context, dn_nlevels may be different from the 1493 * dn_nlevels used when dbuf was dirtied. 1494 */ 1495 ASSERT(db->db_objset == 1496 dmu_objset_pool(db->db_objset)->dp_meta_objset || 1497 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1498 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1499 ASSERT0(db->db_level); 1500 ASSERT(MUTEX_HELD(&db->db_mtx)); 1501 1502 /* 1503 * If this buffer is not dirty, we're done. 1504 */ 1505 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1506 if (dr->dr_txg <= txg) 1507 break; 1508 if (dr == NULL || dr->dr_txg < txg) 1509 return (B_FALSE); 1510 ASSERT(dr->dr_txg == txg); 1511 ASSERT(dr->dr_dbuf == db); 1512 1513 DB_DNODE_ENTER(db); 1514 dn = DB_DNODE(db); 1515 1516 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1517 1518 ASSERT(db->db.db_size != 0); 1519 1520 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 1521 dr->dr_accounted, txg); 1522 1523 *drp = dr->dr_next; 1524 1525 /* 1526 * Note that there are three places in dbuf_dirty() 1527 * where this dirty record may be put on a list. 1528 * Make sure to do a list_remove corresponding to 1529 * every one of those list_insert calls. 1530 */ 1531 if (dr->dr_parent) { 1532 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1533 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1534 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1535 } else if (db->db_blkid == DMU_SPILL_BLKID || 1536 db->db_level + 1 == dn->dn_nlevels) { 1537 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1538 mutex_enter(&dn->dn_mtx); 1539 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1540 mutex_exit(&dn->dn_mtx); 1541 } 1542 DB_DNODE_EXIT(db); 1543 1544 if (db->db_state != DB_NOFILL) { 1545 dbuf_unoverride(dr); 1546 1547 ASSERT(db->db_buf != NULL); 1548 ASSERT(dr->dt.dl.dr_data != NULL); 1549 if (dr->dt.dl.dr_data != db->db_buf) 1550 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db)); 1551 } 1552 1553 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1554 1555 ASSERT(db->db_dirtycnt > 0); 1556 db->db_dirtycnt -= 1; 1557 1558 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1559 arc_buf_t *buf = db->db_buf; 1560 1561 ASSERT(db->db_state == DB_NOFILL || arc_released(buf)); 1562 dbuf_clear_data(db); 1563 VERIFY(arc_buf_remove_ref(buf, db)); 1564 dbuf_evict(db); 1565 return (B_TRUE); 1566 } 1567 1568 return (B_FALSE); 1569 } 1570 1571 void 1572 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1573 { 1574 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1575 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1576 1577 ASSERT(tx->tx_txg != 0); 1578 ASSERT(!refcount_is_zero(&db->db_holds)); 1579 1580 /* 1581 * Quick check for dirtyness. For already dirty blocks, this 1582 * reduces runtime of this function by >90%, and overall performance 1583 * by 50% for some workloads (e.g. file deletion with indirect blocks 1584 * cached). 1585 */ 1586 mutex_enter(&db->db_mtx); 1587 dbuf_dirty_record_t *dr; 1588 for (dr = db->db_last_dirty; 1589 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 1590 /* 1591 * It's possible that it is already dirty but not cached, 1592 * because there are some calls to dbuf_dirty() that don't 1593 * go through dmu_buf_will_dirty(). 1594 */ 1595 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 1596 /* This dbuf is already dirty and cached. */ 1597 dbuf_redirty(dr); 1598 mutex_exit(&db->db_mtx); 1599 return; 1600 } 1601 } 1602 mutex_exit(&db->db_mtx); 1603 1604 DB_DNODE_ENTER(db); 1605 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1606 rf |= DB_RF_HAVESTRUCT; 1607 DB_DNODE_EXIT(db); 1608 (void) dbuf_read(db, NULL, rf); 1609 (void) dbuf_dirty(db, tx); 1610 } 1611 1612 void 1613 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1614 { 1615 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1616 1617 db->db_state = DB_NOFILL; 1618 1619 dmu_buf_will_fill(db_fake, tx); 1620 } 1621 1622 void 1623 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1624 { 1625 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1626 1627 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1628 ASSERT(tx->tx_txg != 0); 1629 ASSERT(db->db_level == 0); 1630 ASSERT(!refcount_is_zero(&db->db_holds)); 1631 1632 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1633 dmu_tx_private_ok(tx)); 1634 1635 dbuf_noread(db); 1636 (void) dbuf_dirty(db, tx); 1637 } 1638 1639 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1640 /* ARGSUSED */ 1641 void 1642 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1643 { 1644 mutex_enter(&db->db_mtx); 1645 DBUF_VERIFY(db); 1646 1647 if (db->db_state == DB_FILL) { 1648 if (db->db_level == 0 && db->db_freed_in_flight) { 1649 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1650 /* we were freed while filling */ 1651 /* XXX dbuf_undirty? */ 1652 bzero(db->db.db_data, db->db.db_size); 1653 db->db_freed_in_flight = FALSE; 1654 } 1655 db->db_state = DB_CACHED; 1656 cv_broadcast(&db->db_changed); 1657 } 1658 mutex_exit(&db->db_mtx); 1659 } 1660 1661 void 1662 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 1663 bp_embedded_type_t etype, enum zio_compress comp, 1664 int uncompressed_size, int compressed_size, int byteorder, 1665 dmu_tx_t *tx) 1666 { 1667 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1668 struct dirty_leaf *dl; 1669 dmu_object_type_t type; 1670 1671 if (etype == BP_EMBEDDED_TYPE_DATA) { 1672 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1673 SPA_FEATURE_EMBEDDED_DATA)); 1674 } 1675 1676 DB_DNODE_ENTER(db); 1677 type = DB_DNODE(db)->dn_type; 1678 DB_DNODE_EXIT(db); 1679 1680 ASSERT0(db->db_level); 1681 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1682 1683 dmu_buf_will_not_fill(dbuf, tx); 1684 1685 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1686 dl = &db->db_last_dirty->dt.dl; 1687 encode_embedded_bp_compressed(&dl->dr_overridden_by, 1688 data, comp, uncompressed_size, compressed_size); 1689 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 1690 BP_SET_TYPE(&dl->dr_overridden_by, type); 1691 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 1692 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 1693 1694 dl->dr_override_state = DR_OVERRIDDEN; 1695 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 1696 } 1697 1698 /* 1699 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1700 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1701 */ 1702 void 1703 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1704 { 1705 ASSERT(!refcount_is_zero(&db->db_holds)); 1706 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1707 ASSERT(db->db_level == 0); 1708 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA); 1709 ASSERT(buf != NULL); 1710 ASSERT(arc_buf_size(buf) == db->db.db_size); 1711 ASSERT(tx->tx_txg != 0); 1712 1713 arc_return_buf(buf, db); 1714 ASSERT(arc_released(buf)); 1715 1716 mutex_enter(&db->db_mtx); 1717 1718 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1719 cv_wait(&db->db_changed, &db->db_mtx); 1720 1721 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1722 1723 if (db->db_state == DB_CACHED && 1724 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1725 mutex_exit(&db->db_mtx); 1726 (void) dbuf_dirty(db, tx); 1727 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1728 VERIFY(arc_buf_remove_ref(buf, db)); 1729 xuio_stat_wbuf_copied(); 1730 return; 1731 } 1732 1733 xuio_stat_wbuf_nocopy(); 1734 if (db->db_state == DB_CACHED) { 1735 dbuf_dirty_record_t *dr = db->db_last_dirty; 1736 1737 ASSERT(db->db_buf != NULL); 1738 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1739 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1740 if (!arc_released(db->db_buf)) { 1741 ASSERT(dr->dt.dl.dr_override_state == 1742 DR_OVERRIDDEN); 1743 arc_release(db->db_buf, db); 1744 } 1745 dr->dt.dl.dr_data = buf; 1746 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1747 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 1748 arc_release(db->db_buf, db); 1749 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1750 } 1751 db->db_buf = NULL; 1752 } 1753 ASSERT(db->db_buf == NULL); 1754 dbuf_set_data(db, buf); 1755 db->db_state = DB_FILL; 1756 mutex_exit(&db->db_mtx); 1757 (void) dbuf_dirty(db, tx); 1758 dmu_buf_fill_done(&db->db, tx); 1759 } 1760 1761 /* 1762 * "Clear" the contents of this dbuf. This will mark the dbuf 1763 * EVICTING and clear *most* of its references. Unfortunately, 1764 * when we are not holding the dn_dbufs_mtx, we can't clear the 1765 * entry in the dn_dbufs list. We have to wait until dbuf_destroy() 1766 * in this case. For callers from the DMU we will usually see: 1767 * dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy() 1768 * For the arc callback, we will usually see: 1769 * dbuf_do_evict()->dbuf_clear();dbuf_destroy() 1770 * Sometimes, though, we will get a mix of these two: 1771 * DMU: dbuf_clear()->arc_clear_callback() 1772 * ARC: dbuf_do_evict()->dbuf_destroy() 1773 * 1774 * This routine will dissociate the dbuf from the arc, by calling 1775 * arc_clear_callback(), but will not evict the data from the ARC. 1776 */ 1777 void 1778 dbuf_clear(dmu_buf_impl_t *db) 1779 { 1780 dnode_t *dn; 1781 dmu_buf_impl_t *parent = db->db_parent; 1782 dmu_buf_impl_t *dndb; 1783 boolean_t dbuf_gone = B_FALSE; 1784 1785 ASSERT(MUTEX_HELD(&db->db_mtx)); 1786 ASSERT(refcount_is_zero(&db->db_holds)); 1787 1788 dbuf_evict_user(db); 1789 1790 if (db->db_state == DB_CACHED) { 1791 ASSERT(db->db.db_data != NULL); 1792 if (db->db_blkid == DMU_BONUS_BLKID) { 1793 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 1794 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1795 } 1796 db->db.db_data = NULL; 1797 db->db_state = DB_UNCACHED; 1798 } 1799 1800 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1801 ASSERT(db->db_data_pending == NULL); 1802 1803 db->db_state = DB_EVICTING; 1804 db->db_blkptr = NULL; 1805 1806 DB_DNODE_ENTER(db); 1807 dn = DB_DNODE(db); 1808 dndb = dn->dn_dbuf; 1809 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { 1810 avl_remove(&dn->dn_dbufs, db); 1811 atomic_dec_32(&dn->dn_dbufs_count); 1812 membar_producer(); 1813 DB_DNODE_EXIT(db); 1814 /* 1815 * Decrementing the dbuf count means that the hold corresponding 1816 * to the removed dbuf is no longer discounted in dnode_move(), 1817 * so the dnode cannot be moved until after we release the hold. 1818 * The membar_producer() ensures visibility of the decremented 1819 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 1820 * release any lock. 1821 */ 1822 dnode_rele(dn, db); 1823 db->db_dnode_handle = NULL; 1824 } else { 1825 DB_DNODE_EXIT(db); 1826 } 1827 1828 if (db->db_buf) 1829 dbuf_gone = arc_clear_callback(db->db_buf); 1830 1831 if (!dbuf_gone) 1832 mutex_exit(&db->db_mtx); 1833 1834 /* 1835 * If this dbuf is referenced from an indirect dbuf, 1836 * decrement the ref count on the indirect dbuf. 1837 */ 1838 if (parent && parent != dndb) 1839 dbuf_rele(parent, db); 1840 } 1841 1842 /* 1843 * Note: While bpp will always be updated if the function returns success, 1844 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 1845 * this happens when the dnode is the meta-dnode, or a userused or groupused 1846 * object. 1847 */ 1848 static int 1849 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 1850 dmu_buf_impl_t **parentp, blkptr_t **bpp) 1851 { 1852 int nlevels, epbs; 1853 1854 *parentp = NULL; 1855 *bpp = NULL; 1856 1857 ASSERT(blkid != DMU_BONUS_BLKID); 1858 1859 if (blkid == DMU_SPILL_BLKID) { 1860 mutex_enter(&dn->dn_mtx); 1861 if (dn->dn_have_spill && 1862 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1863 *bpp = &dn->dn_phys->dn_spill; 1864 else 1865 *bpp = NULL; 1866 dbuf_add_ref(dn->dn_dbuf, NULL); 1867 *parentp = dn->dn_dbuf; 1868 mutex_exit(&dn->dn_mtx); 1869 return (0); 1870 } 1871 1872 if (dn->dn_phys->dn_nlevels == 0) 1873 nlevels = 1; 1874 else 1875 nlevels = dn->dn_phys->dn_nlevels; 1876 1877 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1878 1879 ASSERT3U(level * epbs, <, 64); 1880 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1881 if (level >= nlevels || 1882 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 1883 /* the buffer has no parent yet */ 1884 return (SET_ERROR(ENOENT)); 1885 } else if (level < nlevels-1) { 1886 /* this block is referenced from an indirect block */ 1887 int err = dbuf_hold_impl(dn, level+1, 1888 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 1889 if (err) 1890 return (err); 1891 err = dbuf_read(*parentp, NULL, 1892 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 1893 if (err) { 1894 dbuf_rele(*parentp, NULL); 1895 *parentp = NULL; 1896 return (err); 1897 } 1898 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 1899 (blkid & ((1ULL << epbs) - 1)); 1900 return (0); 1901 } else { 1902 /* the block is referenced from the dnode */ 1903 ASSERT3U(level, ==, nlevels-1); 1904 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 1905 blkid < dn->dn_phys->dn_nblkptr); 1906 if (dn->dn_dbuf) { 1907 dbuf_add_ref(dn->dn_dbuf, NULL); 1908 *parentp = dn->dn_dbuf; 1909 } 1910 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 1911 return (0); 1912 } 1913 } 1914 1915 static dmu_buf_impl_t * 1916 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 1917 dmu_buf_impl_t *parent, blkptr_t *blkptr) 1918 { 1919 objset_t *os = dn->dn_objset; 1920 dmu_buf_impl_t *db, *odb; 1921 1922 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1923 ASSERT(dn->dn_type != DMU_OT_NONE); 1924 1925 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); 1926 1927 db->db_objset = os; 1928 db->db.db_object = dn->dn_object; 1929 db->db_level = level; 1930 db->db_blkid = blkid; 1931 db->db_last_dirty = NULL; 1932 db->db_dirtycnt = 0; 1933 db->db_dnode_handle = dn->dn_handle; 1934 db->db_parent = parent; 1935 db->db_blkptr = blkptr; 1936 1937 db->db_user = NULL; 1938 db->db_user_immediate_evict = FALSE; 1939 db->db_freed_in_flight = FALSE; 1940 db->db_pending_evict = FALSE; 1941 1942 if (blkid == DMU_BONUS_BLKID) { 1943 ASSERT3P(parent, ==, dn->dn_dbuf); 1944 db->db.db_size = DN_MAX_BONUSLEN - 1945 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 1946 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1947 db->db.db_offset = DMU_BONUS_BLKID; 1948 db->db_state = DB_UNCACHED; 1949 /* the bonus dbuf is not placed in the hash table */ 1950 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1951 return (db); 1952 } else if (blkid == DMU_SPILL_BLKID) { 1953 db->db.db_size = (blkptr != NULL) ? 1954 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 1955 db->db.db_offset = 0; 1956 } else { 1957 int blocksize = 1958 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 1959 db->db.db_size = blocksize; 1960 db->db.db_offset = db->db_blkid * blocksize; 1961 } 1962 1963 /* 1964 * Hold the dn_dbufs_mtx while we get the new dbuf 1965 * in the hash table *and* added to the dbufs list. 1966 * This prevents a possible deadlock with someone 1967 * trying to look up this dbuf before its added to the 1968 * dn_dbufs list. 1969 */ 1970 mutex_enter(&dn->dn_dbufs_mtx); 1971 db->db_state = DB_EVICTING; 1972 if ((odb = dbuf_hash_insert(db)) != NULL) { 1973 /* someone else inserted it first */ 1974 kmem_cache_free(dbuf_cache, db); 1975 mutex_exit(&dn->dn_dbufs_mtx); 1976 return (odb); 1977 } 1978 avl_add(&dn->dn_dbufs, db); 1979 if (db->db_level == 0 && db->db_blkid >= 1980 dn->dn_unlisted_l0_blkid) 1981 dn->dn_unlisted_l0_blkid = db->db_blkid + 1; 1982 db->db_state = DB_UNCACHED; 1983 mutex_exit(&dn->dn_dbufs_mtx); 1984 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1985 1986 if (parent && parent != dn->dn_dbuf) 1987 dbuf_add_ref(parent, db); 1988 1989 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1990 refcount_count(&dn->dn_holds) > 0); 1991 (void) refcount_add(&dn->dn_holds, db); 1992 atomic_inc_32(&dn->dn_dbufs_count); 1993 1994 dprintf_dbuf(db, "db=%p\n", db); 1995 1996 return (db); 1997 } 1998 1999 static int 2000 dbuf_do_evict(void *private) 2001 { 2002 dmu_buf_impl_t *db = private; 2003 2004 if (!MUTEX_HELD(&db->db_mtx)) 2005 mutex_enter(&db->db_mtx); 2006 2007 ASSERT(refcount_is_zero(&db->db_holds)); 2008 2009 if (db->db_state != DB_EVICTING) { 2010 ASSERT(db->db_state == DB_CACHED); 2011 DBUF_VERIFY(db); 2012 db->db_buf = NULL; 2013 dbuf_evict(db); 2014 } else { 2015 mutex_exit(&db->db_mtx); 2016 dbuf_destroy(db); 2017 } 2018 return (0); 2019 } 2020 2021 static void 2022 dbuf_destroy(dmu_buf_impl_t *db) 2023 { 2024 ASSERT(refcount_is_zero(&db->db_holds)); 2025 2026 if (db->db_blkid != DMU_BONUS_BLKID) { 2027 /* 2028 * If this dbuf is still on the dn_dbufs list, 2029 * remove it from that list. 2030 */ 2031 if (db->db_dnode_handle != NULL) { 2032 dnode_t *dn; 2033 2034 DB_DNODE_ENTER(db); 2035 dn = DB_DNODE(db); 2036 mutex_enter(&dn->dn_dbufs_mtx); 2037 avl_remove(&dn->dn_dbufs, db); 2038 atomic_dec_32(&dn->dn_dbufs_count); 2039 mutex_exit(&dn->dn_dbufs_mtx); 2040 DB_DNODE_EXIT(db); 2041 /* 2042 * Decrementing the dbuf count means that the hold 2043 * corresponding to the removed dbuf is no longer 2044 * discounted in dnode_move(), so the dnode cannot be 2045 * moved until after we release the hold. 2046 */ 2047 dnode_rele(dn, db); 2048 db->db_dnode_handle = NULL; 2049 } 2050 dbuf_hash_remove(db); 2051 } 2052 db->db_parent = NULL; 2053 db->db_buf = NULL; 2054 2055 ASSERT(db->db.db_data == NULL); 2056 ASSERT(db->db_hash_next == NULL); 2057 ASSERT(db->db_blkptr == NULL); 2058 ASSERT(db->db_data_pending == NULL); 2059 2060 kmem_cache_free(dbuf_cache, db); 2061 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2062 } 2063 2064 typedef struct dbuf_prefetch_arg { 2065 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2066 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2067 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2068 int dpa_curlevel; /* The current level that we're reading */ 2069 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2070 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2071 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2072 } dbuf_prefetch_arg_t; 2073 2074 /* 2075 * Actually issue the prefetch read for the block given. 2076 */ 2077 static void 2078 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2079 { 2080 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2081 return; 2082 2083 arc_flags_t aflags = 2084 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2085 2086 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2087 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2088 ASSERT(dpa->dpa_zio != NULL); 2089 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2090 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2091 &aflags, &dpa->dpa_zb); 2092 } 2093 2094 /* 2095 * Called when an indirect block above our prefetch target is read in. This 2096 * will either read in the next indirect block down the tree or issue the actual 2097 * prefetch if the next block down is our target. 2098 */ 2099 static void 2100 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2101 { 2102 dbuf_prefetch_arg_t *dpa = private; 2103 2104 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2105 ASSERT3S(dpa->dpa_curlevel, >, 0); 2106 if (zio != NULL) { 2107 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2108 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2109 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2110 } 2111 2112 dpa->dpa_curlevel--; 2113 2114 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2115 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2116 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2117 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2118 if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { 2119 kmem_free(dpa, sizeof (*dpa)); 2120 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2121 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2122 dbuf_issue_final_prefetch(dpa, bp); 2123 kmem_free(dpa, sizeof (*dpa)); 2124 } else { 2125 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2126 zbookmark_phys_t zb; 2127 2128 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2129 2130 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2131 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2132 2133 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2134 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2135 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2136 &iter_aflags, &zb); 2137 } 2138 (void) arc_buf_remove_ref(abuf, private); 2139 } 2140 2141 /* 2142 * Issue prefetch reads for the given block on the given level. If the indirect 2143 * blocks above that block are not in memory, we will read them in 2144 * asynchronously. As a result, this call never blocks waiting for a read to 2145 * complete. 2146 */ 2147 void 2148 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2149 arc_flags_t aflags) 2150 { 2151 blkptr_t bp; 2152 int epbs, nlevels, curlevel; 2153 uint64_t curblkid; 2154 2155 ASSERT(blkid != DMU_BONUS_BLKID); 2156 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2157 2158 if (blkid > dn->dn_maxblkid) 2159 return; 2160 2161 if (dnode_block_freed(dn, blkid)) 2162 return; 2163 2164 /* 2165 * This dnode hasn't been written to disk yet, so there's nothing to 2166 * prefetch. 2167 */ 2168 nlevels = dn->dn_phys->dn_nlevels; 2169 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2170 return; 2171 2172 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2173 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2174 return; 2175 2176 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2177 level, blkid); 2178 if (db != NULL) { 2179 mutex_exit(&db->db_mtx); 2180 /* 2181 * This dbuf already exists. It is either CACHED, or 2182 * (we assume) about to be read or filled. 2183 */ 2184 return; 2185 } 2186 2187 /* 2188 * Find the closest ancestor (indirect block) of the target block 2189 * that is present in the cache. In this indirect block, we will 2190 * find the bp that is at curlevel, curblkid. 2191 */ 2192 curlevel = level; 2193 curblkid = blkid; 2194 while (curlevel < nlevels - 1) { 2195 int parent_level = curlevel + 1; 2196 uint64_t parent_blkid = curblkid >> epbs; 2197 dmu_buf_impl_t *db; 2198 2199 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2200 FALSE, TRUE, FTAG, &db) == 0) { 2201 blkptr_t *bpp = db->db_buf->b_data; 2202 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2203 dbuf_rele(db, FTAG); 2204 break; 2205 } 2206 2207 curlevel = parent_level; 2208 curblkid = parent_blkid; 2209 } 2210 2211 if (curlevel == nlevels - 1) { 2212 /* No cached indirect blocks found. */ 2213 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2214 bp = dn->dn_phys->dn_blkptr[curblkid]; 2215 } 2216 if (BP_IS_HOLE(&bp)) 2217 return; 2218 2219 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2220 2221 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2222 ZIO_FLAG_CANFAIL); 2223 2224 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2225 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2226 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2227 dn->dn_object, level, blkid); 2228 dpa->dpa_curlevel = curlevel; 2229 dpa->dpa_prio = prio; 2230 dpa->dpa_aflags = aflags; 2231 dpa->dpa_spa = dn->dn_objset->os_spa; 2232 dpa->dpa_epbs = epbs; 2233 dpa->dpa_zio = pio; 2234 2235 /* 2236 * If we have the indirect just above us, no need to do the asynchronous 2237 * prefetch chain; we'll just run the last step ourselves. If we're at 2238 * a higher level, though, we want to issue the prefetches for all the 2239 * indirect blocks asynchronously, so we can go on with whatever we were 2240 * doing. 2241 */ 2242 if (curlevel == level) { 2243 ASSERT3U(curblkid, ==, blkid); 2244 dbuf_issue_final_prefetch(dpa, &bp); 2245 kmem_free(dpa, sizeof (*dpa)); 2246 } else { 2247 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2248 zbookmark_phys_t zb; 2249 2250 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2251 dn->dn_object, curlevel, curblkid); 2252 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2253 &bp, dbuf_prefetch_indirect_done, dpa, prio, 2254 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2255 &iter_aflags, &zb); 2256 } 2257 /* 2258 * We use pio here instead of dpa_zio since it's possible that 2259 * dpa may have already been freed. 2260 */ 2261 zio_nowait(pio); 2262 } 2263 2264 /* 2265 * Returns with db_holds incremented, and db_mtx not held. 2266 * Note: dn_struct_rwlock must be held. 2267 */ 2268 int 2269 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2270 boolean_t fail_sparse, boolean_t fail_uncached, 2271 void *tag, dmu_buf_impl_t **dbp) 2272 { 2273 dmu_buf_impl_t *db, *parent = NULL; 2274 2275 ASSERT(blkid != DMU_BONUS_BLKID); 2276 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2277 ASSERT3U(dn->dn_nlevels, >, level); 2278 2279 *dbp = NULL; 2280 top: 2281 /* dbuf_find() returns with db_mtx held */ 2282 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2283 2284 if (db == NULL) { 2285 blkptr_t *bp = NULL; 2286 int err; 2287 2288 if (fail_uncached) 2289 return (SET_ERROR(ENOENT)); 2290 2291 ASSERT3P(parent, ==, NULL); 2292 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2293 if (fail_sparse) { 2294 if (err == 0 && bp && BP_IS_HOLE(bp)) 2295 err = SET_ERROR(ENOENT); 2296 if (err) { 2297 if (parent) 2298 dbuf_rele(parent, NULL); 2299 return (err); 2300 } 2301 } 2302 if (err && err != ENOENT) 2303 return (err); 2304 db = dbuf_create(dn, level, blkid, parent, bp); 2305 } 2306 2307 if (fail_uncached && db->db_state != DB_CACHED) { 2308 mutex_exit(&db->db_mtx); 2309 return (SET_ERROR(ENOENT)); 2310 } 2311 2312 if (db->db_buf && refcount_is_zero(&db->db_holds)) { 2313 arc_buf_add_ref(db->db_buf, db); 2314 if (db->db_buf->b_data == NULL) { 2315 dbuf_clear(db); 2316 if (parent) { 2317 dbuf_rele(parent, NULL); 2318 parent = NULL; 2319 } 2320 goto top; 2321 } 2322 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2323 } 2324 2325 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2326 2327 /* 2328 * If this buffer is currently syncing out, and we are are 2329 * still referencing it from db_data, we need to make a copy 2330 * of it in case we decide we want to dirty it again in this txg. 2331 */ 2332 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2333 dn->dn_object != DMU_META_DNODE_OBJECT && 2334 db->db_state == DB_CACHED && db->db_data_pending) { 2335 dbuf_dirty_record_t *dr = db->db_data_pending; 2336 2337 if (dr->dt.dl.dr_data == db->db_buf) { 2338 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2339 2340 dbuf_set_data(db, 2341 arc_buf_alloc(dn->dn_objset->os_spa, 2342 db->db.db_size, db, type)); 2343 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2344 db->db.db_size); 2345 } 2346 } 2347 2348 (void) refcount_add(&db->db_holds, tag); 2349 DBUF_VERIFY(db); 2350 mutex_exit(&db->db_mtx); 2351 2352 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2353 if (parent) 2354 dbuf_rele(parent, NULL); 2355 2356 ASSERT3P(DB_DNODE(db), ==, dn); 2357 ASSERT3U(db->db_blkid, ==, blkid); 2358 ASSERT3U(db->db_level, ==, level); 2359 *dbp = db; 2360 2361 return (0); 2362 } 2363 2364 dmu_buf_impl_t * 2365 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2366 { 2367 return (dbuf_hold_level(dn, 0, blkid, tag)); 2368 } 2369 2370 dmu_buf_impl_t * 2371 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2372 { 2373 dmu_buf_impl_t *db; 2374 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2375 return (err ? NULL : db); 2376 } 2377 2378 void 2379 dbuf_create_bonus(dnode_t *dn) 2380 { 2381 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2382 2383 ASSERT(dn->dn_bonus == NULL); 2384 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 2385 } 2386 2387 int 2388 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 2389 { 2390 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2391 dnode_t *dn; 2392 2393 if (db->db_blkid != DMU_SPILL_BLKID) 2394 return (SET_ERROR(ENOTSUP)); 2395 if (blksz == 0) 2396 blksz = SPA_MINBLOCKSIZE; 2397 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2398 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 2399 2400 DB_DNODE_ENTER(db); 2401 dn = DB_DNODE(db); 2402 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2403 dbuf_new_size(db, blksz, tx); 2404 rw_exit(&dn->dn_struct_rwlock); 2405 DB_DNODE_EXIT(db); 2406 2407 return (0); 2408 } 2409 2410 void 2411 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 2412 { 2413 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2414 } 2415 2416 #pragma weak dmu_buf_add_ref = dbuf_add_ref 2417 void 2418 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2419 { 2420 int64_t holds = refcount_add(&db->db_holds, tag); 2421 ASSERT(holds > 1); 2422 } 2423 2424 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2425 boolean_t 2426 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2427 void *tag) 2428 { 2429 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2430 dmu_buf_impl_t *found_db; 2431 boolean_t result = B_FALSE; 2432 2433 if (db->db_blkid == DMU_BONUS_BLKID) 2434 found_db = dbuf_find_bonus(os, obj); 2435 else 2436 found_db = dbuf_find(os, obj, 0, blkid); 2437 2438 if (found_db != NULL) { 2439 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2440 (void) refcount_add(&db->db_holds, tag); 2441 result = B_TRUE; 2442 } 2443 mutex_exit(&db->db_mtx); 2444 } 2445 return (result); 2446 } 2447 2448 /* 2449 * If you call dbuf_rele() you had better not be referencing the dnode handle 2450 * unless you have some other direct or indirect hold on the dnode. (An indirect 2451 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2452 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2453 * dnode's parent dbuf evicting its dnode handles. 2454 */ 2455 void 2456 dbuf_rele(dmu_buf_impl_t *db, void *tag) 2457 { 2458 mutex_enter(&db->db_mtx); 2459 dbuf_rele_and_unlock(db, tag); 2460 } 2461 2462 void 2463 dmu_buf_rele(dmu_buf_t *db, void *tag) 2464 { 2465 dbuf_rele((dmu_buf_impl_t *)db, tag); 2466 } 2467 2468 /* 2469 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2470 * db_dirtycnt and db_holds to be updated atomically. 2471 */ 2472 void 2473 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2474 { 2475 int64_t holds; 2476 2477 ASSERT(MUTEX_HELD(&db->db_mtx)); 2478 DBUF_VERIFY(db); 2479 2480 /* 2481 * Remove the reference to the dbuf before removing its hold on the 2482 * dnode so we can guarantee in dnode_move() that a referenced bonus 2483 * buffer has a corresponding dnode hold. 2484 */ 2485 holds = refcount_remove(&db->db_holds, tag); 2486 ASSERT(holds >= 0); 2487 2488 /* 2489 * We can't freeze indirects if there is a possibility that they 2490 * may be modified in the current syncing context. 2491 */ 2492 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) 2493 arc_buf_freeze(db->db_buf); 2494 2495 if (holds == db->db_dirtycnt && 2496 db->db_level == 0 && db->db_user_immediate_evict) 2497 dbuf_evict_user(db); 2498 2499 if (holds == 0) { 2500 if (db->db_blkid == DMU_BONUS_BLKID) { 2501 dnode_t *dn; 2502 boolean_t evict_dbuf = db->db_pending_evict; 2503 2504 /* 2505 * If the dnode moves here, we cannot cross this 2506 * barrier until the move completes. 2507 */ 2508 DB_DNODE_ENTER(db); 2509 2510 dn = DB_DNODE(db); 2511 atomic_dec_32(&dn->dn_dbufs_count); 2512 2513 /* 2514 * Decrementing the dbuf count means that the bonus 2515 * buffer's dnode hold is no longer discounted in 2516 * dnode_move(). The dnode cannot move until after 2517 * the dnode_rele() below. 2518 */ 2519 DB_DNODE_EXIT(db); 2520 2521 /* 2522 * Do not reference db after its lock is dropped. 2523 * Another thread may evict it. 2524 */ 2525 mutex_exit(&db->db_mtx); 2526 2527 if (evict_dbuf) 2528 dnode_evict_bonus(dn); 2529 2530 dnode_rele(dn, db); 2531 } else if (db->db_buf == NULL) { 2532 /* 2533 * This is a special case: we never associated this 2534 * dbuf with any data allocated from the ARC. 2535 */ 2536 ASSERT(db->db_state == DB_UNCACHED || 2537 db->db_state == DB_NOFILL); 2538 dbuf_evict(db); 2539 } else if (arc_released(db->db_buf)) { 2540 arc_buf_t *buf = db->db_buf; 2541 /* 2542 * This dbuf has anonymous data associated with it. 2543 */ 2544 dbuf_clear_data(db); 2545 VERIFY(arc_buf_remove_ref(buf, db)); 2546 dbuf_evict(db); 2547 } else { 2548 VERIFY(!arc_buf_remove_ref(db->db_buf, db)); 2549 2550 /* 2551 * A dbuf will be eligible for eviction if either the 2552 * 'primarycache' property is set or a duplicate 2553 * copy of this buffer is already cached in the arc. 2554 * 2555 * In the case of the 'primarycache' a buffer 2556 * is considered for eviction if it matches the 2557 * criteria set in the property. 2558 * 2559 * To decide if our buffer is considered a 2560 * duplicate, we must call into the arc to determine 2561 * if multiple buffers are referencing the same 2562 * block on-disk. If so, then we simply evict 2563 * ourselves. 2564 */ 2565 if (!DBUF_IS_CACHEABLE(db)) { 2566 if (db->db_blkptr != NULL && 2567 !BP_IS_HOLE(db->db_blkptr) && 2568 !BP_IS_EMBEDDED(db->db_blkptr)) { 2569 spa_t *spa = 2570 dmu_objset_spa(db->db_objset); 2571 blkptr_t bp = *db->db_blkptr; 2572 dbuf_clear(db); 2573 arc_freed(spa, &bp); 2574 } else { 2575 dbuf_clear(db); 2576 } 2577 } else if (db->db_pending_evict || 2578 arc_buf_eviction_needed(db->db_buf)) { 2579 dbuf_clear(db); 2580 } else { 2581 mutex_exit(&db->db_mtx); 2582 } 2583 } 2584 } else { 2585 mutex_exit(&db->db_mtx); 2586 } 2587 } 2588 2589 #pragma weak dmu_buf_refcount = dbuf_refcount 2590 uint64_t 2591 dbuf_refcount(dmu_buf_impl_t *db) 2592 { 2593 return (refcount_count(&db->db_holds)); 2594 } 2595 2596 void * 2597 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2598 dmu_buf_user_t *new_user) 2599 { 2600 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2601 2602 mutex_enter(&db->db_mtx); 2603 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2604 if (db->db_user == old_user) 2605 db->db_user = new_user; 2606 else 2607 old_user = db->db_user; 2608 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2609 mutex_exit(&db->db_mtx); 2610 2611 return (old_user); 2612 } 2613 2614 void * 2615 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2616 { 2617 return (dmu_buf_replace_user(db_fake, NULL, user)); 2618 } 2619 2620 void * 2621 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2622 { 2623 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2624 2625 db->db_user_immediate_evict = TRUE; 2626 return (dmu_buf_set_user(db_fake, user)); 2627 } 2628 2629 void * 2630 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2631 { 2632 return (dmu_buf_replace_user(db_fake, user, NULL)); 2633 } 2634 2635 void * 2636 dmu_buf_get_user(dmu_buf_t *db_fake) 2637 { 2638 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2639 2640 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2641 return (db->db_user); 2642 } 2643 2644 void 2645 dmu_buf_user_evict_wait() 2646 { 2647 taskq_wait(dbu_evict_taskq); 2648 } 2649 2650 boolean_t 2651 dmu_buf_freeable(dmu_buf_t *dbuf) 2652 { 2653 boolean_t res = B_FALSE; 2654 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2655 2656 if (db->db_blkptr) 2657 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset, 2658 db->db_blkptr, db->db_blkptr->blk_birth); 2659 2660 return (res); 2661 } 2662 2663 blkptr_t * 2664 dmu_buf_get_blkptr(dmu_buf_t *db) 2665 { 2666 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2667 return (dbi->db_blkptr); 2668 } 2669 2670 static void 2671 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2672 { 2673 /* ASSERT(dmu_tx_is_syncing(tx) */ 2674 ASSERT(MUTEX_HELD(&db->db_mtx)); 2675 2676 if (db->db_blkptr != NULL) 2677 return; 2678 2679 if (db->db_blkid == DMU_SPILL_BLKID) { 2680 db->db_blkptr = &dn->dn_phys->dn_spill; 2681 BP_ZERO(db->db_blkptr); 2682 return; 2683 } 2684 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2685 /* 2686 * This buffer was allocated at a time when there was 2687 * no available blkptrs from the dnode, or it was 2688 * inappropriate to hook it in (i.e., nlevels mis-match). 2689 */ 2690 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2691 ASSERT(db->db_parent == NULL); 2692 db->db_parent = dn->dn_dbuf; 2693 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2694 DBUF_VERIFY(db); 2695 } else { 2696 dmu_buf_impl_t *parent = db->db_parent; 2697 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2698 2699 ASSERT(dn->dn_phys->dn_nlevels > 1); 2700 if (parent == NULL) { 2701 mutex_exit(&db->db_mtx); 2702 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2703 parent = dbuf_hold_level(dn, db->db_level + 1, 2704 db->db_blkid >> epbs, db); 2705 rw_exit(&dn->dn_struct_rwlock); 2706 mutex_enter(&db->db_mtx); 2707 db->db_parent = parent; 2708 } 2709 db->db_blkptr = (blkptr_t *)parent->db.db_data + 2710 (db->db_blkid & ((1ULL << epbs) - 1)); 2711 DBUF_VERIFY(db); 2712 } 2713 } 2714 2715 static void 2716 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2717 { 2718 dmu_buf_impl_t *db = dr->dr_dbuf; 2719 dnode_t *dn; 2720 zio_t *zio; 2721 2722 ASSERT(dmu_tx_is_syncing(tx)); 2723 2724 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2725 2726 mutex_enter(&db->db_mtx); 2727 2728 ASSERT(db->db_level > 0); 2729 DBUF_VERIFY(db); 2730 2731 /* Read the block if it hasn't been read yet. */ 2732 if (db->db_buf == NULL) { 2733 mutex_exit(&db->db_mtx); 2734 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2735 mutex_enter(&db->db_mtx); 2736 } 2737 ASSERT3U(db->db_state, ==, DB_CACHED); 2738 ASSERT(db->db_buf != NULL); 2739 2740 DB_DNODE_ENTER(db); 2741 dn = DB_DNODE(db); 2742 /* Indirect block size must match what the dnode thinks it is. */ 2743 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2744 dbuf_check_blkptr(dn, db); 2745 DB_DNODE_EXIT(db); 2746 2747 /* Provide the pending dirty record to child dbufs */ 2748 db->db_data_pending = dr; 2749 2750 mutex_exit(&db->db_mtx); 2751 dbuf_write(dr, db->db_buf, tx); 2752 2753 zio = dr->dr_zio; 2754 mutex_enter(&dr->dt.di.dr_mtx); 2755 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 2756 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2757 mutex_exit(&dr->dt.di.dr_mtx); 2758 zio_nowait(zio); 2759 } 2760 2761 static void 2762 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2763 { 2764 arc_buf_t **datap = &dr->dt.dl.dr_data; 2765 dmu_buf_impl_t *db = dr->dr_dbuf; 2766 dnode_t *dn; 2767 objset_t *os; 2768 uint64_t txg = tx->tx_txg; 2769 2770 ASSERT(dmu_tx_is_syncing(tx)); 2771 2772 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2773 2774 mutex_enter(&db->db_mtx); 2775 /* 2776 * To be synced, we must be dirtied. But we 2777 * might have been freed after the dirty. 2778 */ 2779 if (db->db_state == DB_UNCACHED) { 2780 /* This buffer has been freed since it was dirtied */ 2781 ASSERT(db->db.db_data == NULL); 2782 } else if (db->db_state == DB_FILL) { 2783 /* This buffer was freed and is now being re-filled */ 2784 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 2785 } else { 2786 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 2787 } 2788 DBUF_VERIFY(db); 2789 2790 DB_DNODE_ENTER(db); 2791 dn = DB_DNODE(db); 2792 2793 if (db->db_blkid == DMU_SPILL_BLKID) { 2794 mutex_enter(&dn->dn_mtx); 2795 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 2796 mutex_exit(&dn->dn_mtx); 2797 } 2798 2799 /* 2800 * If this is a bonus buffer, simply copy the bonus data into the 2801 * dnode. It will be written out when the dnode is synced (and it 2802 * will be synced, since it must have been dirty for dbuf_sync to 2803 * be called). 2804 */ 2805 if (db->db_blkid == DMU_BONUS_BLKID) { 2806 dbuf_dirty_record_t **drp; 2807 2808 ASSERT(*datap != NULL); 2809 ASSERT0(db->db_level); 2810 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 2811 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 2812 DB_DNODE_EXIT(db); 2813 2814 if (*datap != db->db.db_data) { 2815 zio_buf_free(*datap, DN_MAX_BONUSLEN); 2816 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2817 } 2818 db->db_data_pending = NULL; 2819 drp = &db->db_last_dirty; 2820 while (*drp != dr) 2821 drp = &(*drp)->dr_next; 2822 ASSERT(dr->dr_next == NULL); 2823 ASSERT(dr->dr_dbuf == db); 2824 *drp = dr->dr_next; 2825 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2826 ASSERT(db->db_dirtycnt > 0); 2827 db->db_dirtycnt -= 1; 2828 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 2829 return; 2830 } 2831 2832 os = dn->dn_objset; 2833 2834 /* 2835 * This function may have dropped the db_mtx lock allowing a dmu_sync 2836 * operation to sneak in. As a result, we need to ensure that we 2837 * don't check the dr_override_state until we have returned from 2838 * dbuf_check_blkptr. 2839 */ 2840 dbuf_check_blkptr(dn, db); 2841 2842 /* 2843 * If this buffer is in the middle of an immediate write, 2844 * wait for the synchronous IO to complete. 2845 */ 2846 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 2847 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 2848 cv_wait(&db->db_changed, &db->db_mtx); 2849 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 2850 } 2851 2852 if (db->db_state != DB_NOFILL && 2853 dn->dn_object != DMU_META_DNODE_OBJECT && 2854 refcount_count(&db->db_holds) > 1 && 2855 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 2856 *datap == db->db_buf) { 2857 /* 2858 * If this buffer is currently "in use" (i.e., there 2859 * are active holds and db_data still references it), 2860 * then make a copy before we start the write so that 2861 * any modifications from the open txg will not leak 2862 * into this write. 2863 * 2864 * NOTE: this copy does not need to be made for 2865 * objects only modified in the syncing context (e.g. 2866 * DNONE_DNODE blocks). 2867 */ 2868 int blksz = arc_buf_size(*datap); 2869 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2870 *datap = arc_buf_alloc(os->os_spa, blksz, db, type); 2871 bcopy(db->db.db_data, (*datap)->b_data, blksz); 2872 } 2873 db->db_data_pending = dr; 2874 2875 mutex_exit(&db->db_mtx); 2876 2877 dbuf_write(dr, *datap, tx); 2878 2879 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2880 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 2881 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 2882 DB_DNODE_EXIT(db); 2883 } else { 2884 /* 2885 * Although zio_nowait() does not "wait for an IO", it does 2886 * initiate the IO. If this is an empty write it seems plausible 2887 * that the IO could actually be completed before the nowait 2888 * returns. We need to DB_DNODE_EXIT() first in case 2889 * zio_nowait() invalidates the dbuf. 2890 */ 2891 DB_DNODE_EXIT(db); 2892 zio_nowait(dr->dr_zio); 2893 } 2894 } 2895 2896 void 2897 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 2898 { 2899 dbuf_dirty_record_t *dr; 2900 2901 while (dr = list_head(list)) { 2902 if (dr->dr_zio != NULL) { 2903 /* 2904 * If we find an already initialized zio then we 2905 * are processing the meta-dnode, and we have finished. 2906 * The dbufs for all dnodes are put back on the list 2907 * during processing, so that we can zio_wait() 2908 * these IOs after initiating all child IOs. 2909 */ 2910 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 2911 DMU_META_DNODE_OBJECT); 2912 break; 2913 } 2914 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 2915 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 2916 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 2917 } 2918 list_remove(list, dr); 2919 if (dr->dr_dbuf->db_level > 0) 2920 dbuf_sync_indirect(dr, tx); 2921 else 2922 dbuf_sync_leaf(dr, tx); 2923 } 2924 } 2925 2926 /* ARGSUSED */ 2927 static void 2928 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 2929 { 2930 dmu_buf_impl_t *db = vdb; 2931 dnode_t *dn; 2932 blkptr_t *bp = zio->io_bp; 2933 blkptr_t *bp_orig = &zio->io_bp_orig; 2934 spa_t *spa = zio->io_spa; 2935 int64_t delta; 2936 uint64_t fill = 0; 2937 int i; 2938 2939 ASSERT3P(db->db_blkptr, !=, NULL); 2940 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 2941 2942 DB_DNODE_ENTER(db); 2943 dn = DB_DNODE(db); 2944 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 2945 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 2946 zio->io_prev_space_delta = delta; 2947 2948 if (bp->blk_birth != 0) { 2949 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 2950 BP_GET_TYPE(bp) == dn->dn_type) || 2951 (db->db_blkid == DMU_SPILL_BLKID && 2952 BP_GET_TYPE(bp) == dn->dn_bonustype) || 2953 BP_IS_EMBEDDED(bp)); 2954 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 2955 } 2956 2957 mutex_enter(&db->db_mtx); 2958 2959 #ifdef ZFS_DEBUG 2960 if (db->db_blkid == DMU_SPILL_BLKID) { 2961 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 2962 ASSERT(!(BP_IS_HOLE(bp)) && 2963 db->db_blkptr == &dn->dn_phys->dn_spill); 2964 } 2965 #endif 2966 2967 if (db->db_level == 0) { 2968 mutex_enter(&dn->dn_mtx); 2969 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 2970 db->db_blkid != DMU_SPILL_BLKID) 2971 dn->dn_phys->dn_maxblkid = db->db_blkid; 2972 mutex_exit(&dn->dn_mtx); 2973 2974 if (dn->dn_type == DMU_OT_DNODE) { 2975 dnode_phys_t *dnp = db->db.db_data; 2976 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 2977 i--, dnp++) { 2978 if (dnp->dn_type != DMU_OT_NONE) 2979 fill++; 2980 } 2981 } else { 2982 if (BP_IS_HOLE(bp)) { 2983 fill = 0; 2984 } else { 2985 fill = 1; 2986 } 2987 } 2988 } else { 2989 blkptr_t *ibp = db->db.db_data; 2990 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2991 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 2992 if (BP_IS_HOLE(ibp)) 2993 continue; 2994 fill += BP_GET_FILL(ibp); 2995 } 2996 } 2997 DB_DNODE_EXIT(db); 2998 2999 if (!BP_IS_EMBEDDED(bp)) 3000 bp->blk_fill = fill; 3001 3002 mutex_exit(&db->db_mtx); 3003 3004 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3005 *db->db_blkptr = *bp; 3006 rw_exit(&dn->dn_struct_rwlock); 3007 } 3008 3009 /* ARGSUSED */ 3010 /* 3011 * This function gets called just prior to running through the compression 3012 * stage of the zio pipeline. If we're an indirect block comprised of only 3013 * holes, then we want this indirect to be compressed away to a hole. In 3014 * order to do that we must zero out any information about the holes that 3015 * this indirect points to prior to before we try to compress it. 3016 */ 3017 static void 3018 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3019 { 3020 dmu_buf_impl_t *db = vdb; 3021 dnode_t *dn; 3022 blkptr_t *bp; 3023 uint64_t i; 3024 int epbs; 3025 3026 ASSERT3U(db->db_level, >, 0); 3027 DB_DNODE_ENTER(db); 3028 dn = DB_DNODE(db); 3029 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3030 3031 /* Determine if all our children are holes */ 3032 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 3033 if (!BP_IS_HOLE(bp)) 3034 break; 3035 } 3036 3037 /* 3038 * If all the children are holes, then zero them all out so that 3039 * we may get compressed away. 3040 */ 3041 if (i == 1 << epbs) { 3042 /* didn't find any non-holes */ 3043 bzero(db->db.db_data, db->db.db_size); 3044 } 3045 DB_DNODE_EXIT(db); 3046 } 3047 3048 /* 3049 * The SPA will call this callback several times for each zio - once 3050 * for every physical child i/o (zio->io_phys_children times). This 3051 * allows the DMU to monitor the progress of each logical i/o. For example, 3052 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 3053 * block. There may be a long delay before all copies/fragments are completed, 3054 * so this callback allows us to retire dirty space gradually, as the physical 3055 * i/os complete. 3056 */ 3057 /* ARGSUSED */ 3058 static void 3059 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 3060 { 3061 dmu_buf_impl_t *db = arg; 3062 objset_t *os = db->db_objset; 3063 dsl_pool_t *dp = dmu_objset_pool(os); 3064 dbuf_dirty_record_t *dr; 3065 int delta = 0; 3066 3067 dr = db->db_data_pending; 3068 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 3069 3070 /* 3071 * The callback will be called io_phys_children times. Retire one 3072 * portion of our dirty space each time we are called. Any rounding 3073 * error will be cleaned up by dsl_pool_sync()'s call to 3074 * dsl_pool_undirty_space(). 3075 */ 3076 delta = dr->dr_accounted / zio->io_phys_children; 3077 dsl_pool_undirty_space(dp, delta, zio->io_txg); 3078 } 3079 3080 /* ARGSUSED */ 3081 static void 3082 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3083 { 3084 dmu_buf_impl_t *db = vdb; 3085 blkptr_t *bp_orig = &zio->io_bp_orig; 3086 blkptr_t *bp = db->db_blkptr; 3087 objset_t *os = db->db_objset; 3088 dmu_tx_t *tx = os->os_synctx; 3089 dbuf_dirty_record_t **drp, *dr; 3090 3091 ASSERT0(zio->io_error); 3092 ASSERT(db->db_blkptr == bp); 3093 3094 /* 3095 * For nopwrites and rewrites we ensure that the bp matches our 3096 * original and bypass all the accounting. 3097 */ 3098 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3099 ASSERT(BP_EQUAL(bp, bp_orig)); 3100 } else { 3101 dsl_dataset_t *ds = os->os_dsl_dataset; 3102 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3103 dsl_dataset_block_born(ds, bp, tx); 3104 } 3105 3106 mutex_enter(&db->db_mtx); 3107 3108 DBUF_VERIFY(db); 3109 3110 drp = &db->db_last_dirty; 3111 while ((dr = *drp) != db->db_data_pending) 3112 drp = &dr->dr_next; 3113 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3114 ASSERT(dr->dr_dbuf == db); 3115 ASSERT(dr->dr_next == NULL); 3116 *drp = dr->dr_next; 3117 3118 #ifdef ZFS_DEBUG 3119 if (db->db_blkid == DMU_SPILL_BLKID) { 3120 dnode_t *dn; 3121 3122 DB_DNODE_ENTER(db); 3123 dn = DB_DNODE(db); 3124 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3125 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 3126 db->db_blkptr == &dn->dn_phys->dn_spill); 3127 DB_DNODE_EXIT(db); 3128 } 3129 #endif 3130 3131 if (db->db_level == 0) { 3132 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3133 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 3134 if (db->db_state != DB_NOFILL) { 3135 if (dr->dt.dl.dr_data != db->db_buf) 3136 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 3137 db)); 3138 else if (!arc_released(db->db_buf)) 3139 arc_set_callback(db->db_buf, dbuf_do_evict, db); 3140 } 3141 } else { 3142 dnode_t *dn; 3143 3144 DB_DNODE_ENTER(db); 3145 dn = DB_DNODE(db); 3146 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3147 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3148 if (!BP_IS_HOLE(db->db_blkptr)) { 3149 int epbs = 3150 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3151 ASSERT3U(db->db_blkid, <=, 3152 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3153 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3154 db->db.db_size); 3155 if (!arc_released(db->db_buf)) 3156 arc_set_callback(db->db_buf, dbuf_do_evict, db); 3157 } 3158 DB_DNODE_EXIT(db); 3159 mutex_destroy(&dr->dt.di.dr_mtx); 3160 list_destroy(&dr->dt.di.dr_children); 3161 } 3162 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3163 3164 cv_broadcast(&db->db_changed); 3165 ASSERT(db->db_dirtycnt > 0); 3166 db->db_dirtycnt -= 1; 3167 db->db_data_pending = NULL; 3168 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3169 } 3170 3171 static void 3172 dbuf_write_nofill_ready(zio_t *zio) 3173 { 3174 dbuf_write_ready(zio, NULL, zio->io_private); 3175 } 3176 3177 static void 3178 dbuf_write_nofill_done(zio_t *zio) 3179 { 3180 dbuf_write_done(zio, NULL, zio->io_private); 3181 } 3182 3183 static void 3184 dbuf_write_override_ready(zio_t *zio) 3185 { 3186 dbuf_dirty_record_t *dr = zio->io_private; 3187 dmu_buf_impl_t *db = dr->dr_dbuf; 3188 3189 dbuf_write_ready(zio, NULL, db); 3190 } 3191 3192 static void 3193 dbuf_write_override_done(zio_t *zio) 3194 { 3195 dbuf_dirty_record_t *dr = zio->io_private; 3196 dmu_buf_impl_t *db = dr->dr_dbuf; 3197 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3198 3199 mutex_enter(&db->db_mtx); 3200 if (!BP_EQUAL(zio->io_bp, obp)) { 3201 if (!BP_IS_HOLE(obp)) 3202 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3203 arc_release(dr->dt.dl.dr_data, db); 3204 } 3205 mutex_exit(&db->db_mtx); 3206 3207 dbuf_write_done(zio, NULL, db); 3208 } 3209 3210 /* Issue I/O to commit a dirty buffer to disk. */ 3211 static void 3212 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3213 { 3214 dmu_buf_impl_t *db = dr->dr_dbuf; 3215 dnode_t *dn; 3216 objset_t *os; 3217 dmu_buf_impl_t *parent = db->db_parent; 3218 uint64_t txg = tx->tx_txg; 3219 zbookmark_phys_t zb; 3220 zio_prop_t zp; 3221 zio_t *zio; 3222 int wp_flag = 0; 3223 3224 ASSERT(dmu_tx_is_syncing(tx)); 3225 3226 DB_DNODE_ENTER(db); 3227 dn = DB_DNODE(db); 3228 os = dn->dn_objset; 3229 3230 if (db->db_state != DB_NOFILL) { 3231 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3232 /* 3233 * Private object buffers are released here rather 3234 * than in dbuf_dirty() since they are only modified 3235 * in the syncing context and we don't want the 3236 * overhead of making multiple copies of the data. 3237 */ 3238 if (BP_IS_HOLE(db->db_blkptr)) { 3239 arc_buf_thaw(data); 3240 } else { 3241 dbuf_release_bp(db); 3242 } 3243 } 3244 } 3245 3246 if (parent != dn->dn_dbuf) { 3247 /* Our parent is an indirect block. */ 3248 /* We have a dirty parent that has been scheduled for write. */ 3249 ASSERT(parent && parent->db_data_pending); 3250 /* Our parent's buffer is one level closer to the dnode. */ 3251 ASSERT(db->db_level == parent->db_level-1); 3252 /* 3253 * We're about to modify our parent's db_data by modifying 3254 * our block pointer, so the parent must be released. 3255 */ 3256 ASSERT(arc_released(parent->db_buf)); 3257 zio = parent->db_data_pending->dr_zio; 3258 } else { 3259 /* Our parent is the dnode itself. */ 3260 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 3261 db->db_blkid != DMU_SPILL_BLKID) || 3262 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 3263 if (db->db_blkid != DMU_SPILL_BLKID) 3264 ASSERT3P(db->db_blkptr, ==, 3265 &dn->dn_phys->dn_blkptr[db->db_blkid]); 3266 zio = dn->dn_zio; 3267 } 3268 3269 ASSERT(db->db_level == 0 || data == db->db_buf); 3270 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3271 ASSERT(zio); 3272 3273 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3274 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3275 db->db.db_object, db->db_level, db->db_blkid); 3276 3277 if (db->db_blkid == DMU_SPILL_BLKID) 3278 wp_flag = WP_SPILL; 3279 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 3280 3281 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3282 DB_DNODE_EXIT(db); 3283 3284 /* 3285 * We copy the blkptr now (rather than when we instantiate the dirty 3286 * record), because its value can change between open context and 3287 * syncing context. We do not need to hold dn_struct_rwlock to read 3288 * db_blkptr because we are in syncing context. 3289 */ 3290 dr->dr_bp_copy = *db->db_blkptr; 3291 3292 if (db->db_level == 0 && 3293 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 3294 /* 3295 * The BP for this block has been provided by open context 3296 * (by dmu_sync() or dmu_buf_write_embedded()). 3297 */ 3298 void *contents = (data != NULL) ? data->b_data : NULL; 3299 3300 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3301 &dr->dr_bp_copy, contents, db->db.db_size, &zp, 3302 dbuf_write_override_ready, NULL, NULL, 3303 dbuf_write_override_done, 3304 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3305 mutex_enter(&db->db_mtx); 3306 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3307 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 3308 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3309 mutex_exit(&db->db_mtx); 3310 } else if (db->db_state == DB_NOFILL) { 3311 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3312 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3313 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3314 &dr->dr_bp_copy, NULL, db->db.db_size, &zp, 3315 dbuf_write_nofill_ready, NULL, NULL, 3316 dbuf_write_nofill_done, db, 3317 ZIO_PRIORITY_ASYNC_WRITE, 3318 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3319 } else { 3320 ASSERT(arc_released(data)); 3321 3322 /* 3323 * For indirect blocks, we want to setup the children 3324 * ready callback so that we can properly handle an indirect 3325 * block that only contains holes. 3326 */ 3327 arc_done_func_t *children_ready_cb = NULL; 3328 if (db->db_level != 0) 3329 children_ready_cb = dbuf_write_children_ready; 3330 3331 dr->dr_zio = arc_write(zio, os->os_spa, txg, 3332 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3333 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready, 3334 children_ready_cb, 3335 dbuf_write_physdone, dbuf_write_done, db, 3336 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3337 } 3338 } 3339