1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_send.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dbuf.h> 36 #include <sys/dmu_objset.h> 37 #include <sys/dsl_dataset.h> 38 #include <sys/dsl_dir.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/spa.h> 41 #include <sys/zio.h> 42 #include <sys/dmu_zfetch.h> 43 #include <sys/sa.h> 44 #include <sys/sa_impl.h> 45 #include <sys/zfeature.h> 46 #include <sys/blkptr.h> 47 #include <sys/range_tree.h> 48 49 /* 50 * Number of times that zfs_free_range() took the slow path while doing 51 * a zfs receive. A nonzero value indicates a potential performance problem. 52 */ 53 uint64_t zfs_free_range_recv_miss; 54 55 static void dbuf_destroy(dmu_buf_impl_t *db); 56 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 57 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 58 59 #ifndef __lint 60 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 61 dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp); 62 #endif /* ! __lint */ 63 64 /* 65 * Global data structures and functions for the dbuf cache. 66 */ 67 static kmem_cache_t *dbuf_cache; 68 static taskq_t *dbu_evict_taskq; 69 70 /* ARGSUSED */ 71 static int 72 dbuf_cons(void *vdb, void *unused, int kmflag) 73 { 74 dmu_buf_impl_t *db = vdb; 75 bzero(db, sizeof (dmu_buf_impl_t)); 76 77 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 78 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 79 refcount_create(&db->db_holds); 80 81 return (0); 82 } 83 84 /* ARGSUSED */ 85 static void 86 dbuf_dest(void *vdb, void *unused) 87 { 88 dmu_buf_impl_t *db = vdb; 89 mutex_destroy(&db->db_mtx); 90 cv_destroy(&db->db_changed); 91 refcount_destroy(&db->db_holds); 92 } 93 94 /* 95 * dbuf hash table routines 96 */ 97 static dbuf_hash_table_t dbuf_hash_table; 98 99 static uint64_t dbuf_hash_count; 100 101 static uint64_t 102 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 103 { 104 uintptr_t osv = (uintptr_t)os; 105 uint64_t crc = -1ULL; 106 107 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 108 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 109 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 110 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 111 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 112 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 113 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 114 115 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 116 117 return (crc); 118 } 119 120 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); 121 122 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 123 ((dbuf)->db.db_object == (obj) && \ 124 (dbuf)->db_objset == (os) && \ 125 (dbuf)->db_level == (level) && \ 126 (dbuf)->db_blkid == (blkid)) 127 128 dmu_buf_impl_t * 129 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 130 { 131 dbuf_hash_table_t *h = &dbuf_hash_table; 132 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 133 uint64_t idx = hv & h->hash_table_mask; 134 dmu_buf_impl_t *db; 135 136 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 137 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 138 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 139 mutex_enter(&db->db_mtx); 140 if (db->db_state != DB_EVICTING) { 141 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 142 return (db); 143 } 144 mutex_exit(&db->db_mtx); 145 } 146 } 147 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 148 return (NULL); 149 } 150 151 static dmu_buf_impl_t * 152 dbuf_find_bonus(objset_t *os, uint64_t object) 153 { 154 dnode_t *dn; 155 dmu_buf_impl_t *db = NULL; 156 157 if (dnode_hold(os, object, FTAG, &dn) == 0) { 158 rw_enter(&dn->dn_struct_rwlock, RW_READER); 159 if (dn->dn_bonus != NULL) { 160 db = dn->dn_bonus; 161 mutex_enter(&db->db_mtx); 162 } 163 rw_exit(&dn->dn_struct_rwlock); 164 dnode_rele(dn, FTAG); 165 } 166 return (db); 167 } 168 169 /* 170 * Insert an entry into the hash table. If there is already an element 171 * equal to elem in the hash table, then the already existing element 172 * will be returned and the new element will not be inserted. 173 * Otherwise returns NULL. 174 */ 175 static dmu_buf_impl_t * 176 dbuf_hash_insert(dmu_buf_impl_t *db) 177 { 178 dbuf_hash_table_t *h = &dbuf_hash_table; 179 objset_t *os = db->db_objset; 180 uint64_t obj = db->db.db_object; 181 int level = db->db_level; 182 uint64_t blkid = db->db_blkid; 183 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 184 uint64_t idx = hv & h->hash_table_mask; 185 dmu_buf_impl_t *dbf; 186 187 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 188 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 189 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 190 mutex_enter(&dbf->db_mtx); 191 if (dbf->db_state != DB_EVICTING) { 192 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 193 return (dbf); 194 } 195 mutex_exit(&dbf->db_mtx); 196 } 197 } 198 199 mutex_enter(&db->db_mtx); 200 db->db_hash_next = h->hash_table[idx]; 201 h->hash_table[idx] = db; 202 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 203 atomic_inc_64(&dbuf_hash_count); 204 205 return (NULL); 206 } 207 208 /* 209 * Remove an entry from the hash table. It must be in the EVICTING state. 210 */ 211 static void 212 dbuf_hash_remove(dmu_buf_impl_t *db) 213 { 214 dbuf_hash_table_t *h = &dbuf_hash_table; 215 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, 216 db->db_level, db->db_blkid); 217 uint64_t idx = hv & h->hash_table_mask; 218 dmu_buf_impl_t *dbf, **dbp; 219 220 /* 221 * We musn't hold db_mtx to maintain lock ordering: 222 * DBUF_HASH_MUTEX > db_mtx. 223 */ 224 ASSERT(refcount_is_zero(&db->db_holds)); 225 ASSERT(db->db_state == DB_EVICTING); 226 ASSERT(!MUTEX_HELD(&db->db_mtx)); 227 228 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 229 dbp = &h->hash_table[idx]; 230 while ((dbf = *dbp) != db) { 231 dbp = &dbf->db_hash_next; 232 ASSERT(dbf != NULL); 233 } 234 *dbp = db->db_hash_next; 235 db->db_hash_next = NULL; 236 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 237 atomic_dec_64(&dbuf_hash_count); 238 } 239 240 static arc_evict_func_t dbuf_do_evict; 241 242 typedef enum { 243 DBVU_EVICTING, 244 DBVU_NOT_EVICTING 245 } dbvu_verify_type_t; 246 247 static void 248 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 249 { 250 #ifdef ZFS_DEBUG 251 int64_t holds; 252 253 if (db->db_user == NULL) 254 return; 255 256 /* Only data blocks support the attachment of user data. */ 257 ASSERT(db->db_level == 0); 258 259 /* Clients must resolve a dbuf before attaching user data. */ 260 ASSERT(db->db.db_data != NULL); 261 ASSERT3U(db->db_state, ==, DB_CACHED); 262 263 holds = refcount_count(&db->db_holds); 264 if (verify_type == DBVU_EVICTING) { 265 /* 266 * Immediate eviction occurs when holds == dirtycnt. 267 * For normal eviction buffers, holds is zero on 268 * eviction, except when dbuf_fix_old_data() calls 269 * dbuf_clear_data(). However, the hold count can grow 270 * during eviction even though db_mtx is held (see 271 * dmu_bonus_hold() for an example), so we can only 272 * test the generic invariant that holds >= dirtycnt. 273 */ 274 ASSERT3U(holds, >=, db->db_dirtycnt); 275 } else { 276 if (db->db_user_immediate_evict == TRUE) 277 ASSERT3U(holds, >=, db->db_dirtycnt); 278 else 279 ASSERT3U(holds, >, 0); 280 } 281 #endif 282 } 283 284 static void 285 dbuf_evict_user(dmu_buf_impl_t *db) 286 { 287 dmu_buf_user_t *dbu = db->db_user; 288 289 ASSERT(MUTEX_HELD(&db->db_mtx)); 290 291 if (dbu == NULL) 292 return; 293 294 dbuf_verify_user(db, DBVU_EVICTING); 295 db->db_user = NULL; 296 297 #ifdef ZFS_DEBUG 298 if (dbu->dbu_clear_on_evict_dbufp != NULL) 299 *dbu->dbu_clear_on_evict_dbufp = NULL; 300 #endif 301 302 /* 303 * Invoke the callback from a taskq to avoid lock order reversals 304 * and limit stack depth. 305 */ 306 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0, 307 &dbu->dbu_tqent); 308 } 309 310 boolean_t 311 dbuf_is_metadata(dmu_buf_impl_t *db) 312 { 313 if (db->db_level > 0) { 314 return (B_TRUE); 315 } else { 316 boolean_t is_metadata; 317 318 DB_DNODE_ENTER(db); 319 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 320 DB_DNODE_EXIT(db); 321 322 return (is_metadata); 323 } 324 } 325 326 void 327 dbuf_evict(dmu_buf_impl_t *db) 328 { 329 ASSERT(MUTEX_HELD(&db->db_mtx)); 330 ASSERT(db->db_buf == NULL); 331 ASSERT(db->db_data_pending == NULL); 332 333 dbuf_clear(db); 334 dbuf_destroy(db); 335 } 336 337 void 338 dbuf_init(void) 339 { 340 uint64_t hsize = 1ULL << 16; 341 dbuf_hash_table_t *h = &dbuf_hash_table; 342 int i; 343 344 /* 345 * The hash table is big enough to fill all of physical memory 346 * with an average 4K block size. The table will take up 347 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 348 */ 349 while (hsize * 4096 < physmem * PAGESIZE) 350 hsize <<= 1; 351 352 retry: 353 h->hash_table_mask = hsize - 1; 354 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 355 if (h->hash_table == NULL) { 356 /* XXX - we should really return an error instead of assert */ 357 ASSERT(hsize > (1ULL << 10)); 358 hsize >>= 1; 359 goto retry; 360 } 361 362 dbuf_cache = kmem_cache_create("dmu_buf_impl_t", 363 sizeof (dmu_buf_impl_t), 364 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 365 366 for (i = 0; i < DBUF_MUTEXES; i++) 367 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 368 369 /* 370 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 371 * configuration is not required. 372 */ 373 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 374 } 375 376 void 377 dbuf_fini(void) 378 { 379 dbuf_hash_table_t *h = &dbuf_hash_table; 380 int i; 381 382 for (i = 0; i < DBUF_MUTEXES; i++) 383 mutex_destroy(&h->hash_mutexes[i]); 384 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 385 kmem_cache_destroy(dbuf_cache); 386 taskq_destroy(dbu_evict_taskq); 387 } 388 389 /* 390 * Other stuff. 391 */ 392 393 #ifdef ZFS_DEBUG 394 static void 395 dbuf_verify(dmu_buf_impl_t *db) 396 { 397 dnode_t *dn; 398 dbuf_dirty_record_t *dr; 399 400 ASSERT(MUTEX_HELD(&db->db_mtx)); 401 402 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 403 return; 404 405 ASSERT(db->db_objset != NULL); 406 DB_DNODE_ENTER(db); 407 dn = DB_DNODE(db); 408 if (dn == NULL) { 409 ASSERT(db->db_parent == NULL); 410 ASSERT(db->db_blkptr == NULL); 411 } else { 412 ASSERT3U(db->db.db_object, ==, dn->dn_object); 413 ASSERT3P(db->db_objset, ==, dn->dn_objset); 414 ASSERT3U(db->db_level, <, dn->dn_nlevels); 415 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 416 db->db_blkid == DMU_SPILL_BLKID || 417 !avl_is_empty(&dn->dn_dbufs)); 418 } 419 if (db->db_blkid == DMU_BONUS_BLKID) { 420 ASSERT(dn != NULL); 421 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 422 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 423 } else if (db->db_blkid == DMU_SPILL_BLKID) { 424 ASSERT(dn != NULL); 425 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 426 ASSERT0(db->db.db_offset); 427 } else { 428 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 429 } 430 431 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 432 ASSERT(dr->dr_dbuf == db); 433 434 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 435 ASSERT(dr->dr_dbuf == db); 436 437 /* 438 * We can't assert that db_size matches dn_datablksz because it 439 * can be momentarily different when another thread is doing 440 * dnode_set_blksz(). 441 */ 442 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 443 dr = db->db_data_pending; 444 /* 445 * It should only be modified in syncing context, so 446 * make sure we only have one copy of the data. 447 */ 448 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 449 } 450 451 /* verify db->db_blkptr */ 452 if (db->db_blkptr) { 453 if (db->db_parent == dn->dn_dbuf) { 454 /* db is pointed to by the dnode */ 455 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 456 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 457 ASSERT(db->db_parent == NULL); 458 else 459 ASSERT(db->db_parent != NULL); 460 if (db->db_blkid != DMU_SPILL_BLKID) 461 ASSERT3P(db->db_blkptr, ==, 462 &dn->dn_phys->dn_blkptr[db->db_blkid]); 463 } else { 464 /* db is pointed to by an indirect block */ 465 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 466 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 467 ASSERT3U(db->db_parent->db.db_object, ==, 468 db->db.db_object); 469 /* 470 * dnode_grow_indblksz() can make this fail if we don't 471 * have the struct_rwlock. XXX indblksz no longer 472 * grows. safe to do this now? 473 */ 474 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 475 ASSERT3P(db->db_blkptr, ==, 476 ((blkptr_t *)db->db_parent->db.db_data + 477 db->db_blkid % epb)); 478 } 479 } 480 } 481 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 482 (db->db_buf == NULL || db->db_buf->b_data) && 483 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 484 db->db_state != DB_FILL && !dn->dn_free_txg) { 485 /* 486 * If the blkptr isn't set but they have nonzero data, 487 * it had better be dirty, otherwise we'll lose that 488 * data when we evict this buffer. 489 */ 490 if (db->db_dirtycnt == 0) { 491 uint64_t *buf = db->db.db_data; 492 int i; 493 494 for (i = 0; i < db->db.db_size >> 3; i++) { 495 ASSERT(buf[i] == 0); 496 } 497 } 498 } 499 DB_DNODE_EXIT(db); 500 } 501 #endif 502 503 static void 504 dbuf_clear_data(dmu_buf_impl_t *db) 505 { 506 ASSERT(MUTEX_HELD(&db->db_mtx)); 507 dbuf_evict_user(db); 508 db->db_buf = NULL; 509 db->db.db_data = NULL; 510 if (db->db_state != DB_NOFILL) 511 db->db_state = DB_UNCACHED; 512 } 513 514 static void 515 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 516 { 517 ASSERT(MUTEX_HELD(&db->db_mtx)); 518 ASSERT(buf != NULL); 519 520 db->db_buf = buf; 521 ASSERT(buf->b_data != NULL); 522 db->db.db_data = buf->b_data; 523 if (!arc_released(buf)) 524 arc_set_callback(buf, dbuf_do_evict, db); 525 } 526 527 /* 528 * Loan out an arc_buf for read. Return the loaned arc_buf. 529 */ 530 arc_buf_t * 531 dbuf_loan_arcbuf(dmu_buf_impl_t *db) 532 { 533 arc_buf_t *abuf; 534 535 mutex_enter(&db->db_mtx); 536 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 537 int blksz = db->db.db_size; 538 spa_t *spa = db->db_objset->os_spa; 539 540 mutex_exit(&db->db_mtx); 541 abuf = arc_loan_buf(spa, blksz); 542 bcopy(db->db.db_data, abuf->b_data, blksz); 543 } else { 544 abuf = db->db_buf; 545 arc_loan_inuse_buf(abuf, db); 546 dbuf_clear_data(db); 547 mutex_exit(&db->db_mtx); 548 } 549 return (abuf); 550 } 551 552 /* 553 * Calculate which level n block references the data at the level 0 offset 554 * provided. 555 */ 556 uint64_t 557 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 558 { 559 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 560 /* 561 * The level n blkid is equal to the level 0 blkid divided by 562 * the number of level 0s in a level n block. 563 * 564 * The level 0 blkid is offset >> datablkshift = 565 * offset / 2^datablkshift. 566 * 567 * The number of level 0s in a level n is the number of block 568 * pointers in an indirect block, raised to the power of level. 569 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 570 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 571 * 572 * Thus, the level n blkid is: offset / 573 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 574 * = offset / 2^(datablkshift + level * 575 * (indblkshift - SPA_BLKPTRSHIFT)) 576 * = offset >> (datablkshift + level * 577 * (indblkshift - SPA_BLKPTRSHIFT)) 578 */ 579 return (offset >> (dn->dn_datablkshift + level * 580 (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 581 } else { 582 ASSERT3U(offset, <, dn->dn_datablksz); 583 return (0); 584 } 585 } 586 587 static void 588 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 589 { 590 dmu_buf_impl_t *db = vdb; 591 592 mutex_enter(&db->db_mtx); 593 ASSERT3U(db->db_state, ==, DB_READ); 594 /* 595 * All reads are synchronous, so we must have a hold on the dbuf 596 */ 597 ASSERT(refcount_count(&db->db_holds) > 0); 598 ASSERT(db->db_buf == NULL); 599 ASSERT(db->db.db_data == NULL); 600 if (db->db_level == 0 && db->db_freed_in_flight) { 601 /* we were freed in flight; disregard any error */ 602 arc_release(buf, db); 603 bzero(buf->b_data, db->db.db_size); 604 arc_buf_freeze(buf); 605 db->db_freed_in_flight = FALSE; 606 dbuf_set_data(db, buf); 607 db->db_state = DB_CACHED; 608 } else if (zio == NULL || zio->io_error == 0) { 609 dbuf_set_data(db, buf); 610 db->db_state = DB_CACHED; 611 } else { 612 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 613 ASSERT3P(db->db_buf, ==, NULL); 614 VERIFY(arc_buf_remove_ref(buf, db)); 615 db->db_state = DB_UNCACHED; 616 } 617 cv_broadcast(&db->db_changed); 618 dbuf_rele_and_unlock(db, NULL); 619 } 620 621 static void 622 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 623 { 624 dnode_t *dn; 625 zbookmark_phys_t zb; 626 arc_flags_t aflags = ARC_FLAG_NOWAIT; 627 628 DB_DNODE_ENTER(db); 629 dn = DB_DNODE(db); 630 ASSERT(!refcount_is_zero(&db->db_holds)); 631 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 632 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 633 ASSERT(MUTEX_HELD(&db->db_mtx)); 634 ASSERT(db->db_state == DB_UNCACHED); 635 ASSERT(db->db_buf == NULL); 636 637 if (db->db_blkid == DMU_BONUS_BLKID) { 638 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 639 640 ASSERT3U(bonuslen, <=, db->db.db_size); 641 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 642 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 643 if (bonuslen < DN_MAX_BONUSLEN) 644 bzero(db->db.db_data, DN_MAX_BONUSLEN); 645 if (bonuslen) 646 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 647 DB_DNODE_EXIT(db); 648 db->db_state = DB_CACHED; 649 mutex_exit(&db->db_mtx); 650 return; 651 } 652 653 /* 654 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 655 * processes the delete record and clears the bp while we are waiting 656 * for the dn_mtx (resulting in a "no" from block_freed). 657 */ 658 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 659 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 660 BP_IS_HOLE(db->db_blkptr)))) { 661 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 662 663 DB_DNODE_EXIT(db); 664 dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa, 665 db->db.db_size, db, type)); 666 bzero(db->db.db_data, db->db.db_size); 667 db->db_state = DB_CACHED; 668 mutex_exit(&db->db_mtx); 669 return; 670 } 671 672 DB_DNODE_EXIT(db); 673 674 db->db_state = DB_READ; 675 mutex_exit(&db->db_mtx); 676 677 if (DBUF_IS_L2CACHEABLE(db)) 678 aflags |= ARC_FLAG_L2CACHE; 679 if (DBUF_IS_L2COMPRESSIBLE(db)) 680 aflags |= ARC_FLAG_L2COMPRESS; 681 682 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 683 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 684 db->db.db_object, db->db_level, db->db_blkid); 685 686 dbuf_add_ref(db, NULL); 687 688 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 689 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 690 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 691 &aflags, &zb); 692 } 693 694 int 695 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 696 { 697 int err = 0; 698 boolean_t havepzio = (zio != NULL); 699 boolean_t prefetch; 700 dnode_t *dn; 701 702 /* 703 * We don't have to hold the mutex to check db_state because it 704 * can't be freed while we have a hold on the buffer. 705 */ 706 ASSERT(!refcount_is_zero(&db->db_holds)); 707 708 if (db->db_state == DB_NOFILL) 709 return (SET_ERROR(EIO)); 710 711 DB_DNODE_ENTER(db); 712 dn = DB_DNODE(db); 713 if ((flags & DB_RF_HAVESTRUCT) == 0) 714 rw_enter(&dn->dn_struct_rwlock, RW_READER); 715 716 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 717 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 718 DBUF_IS_CACHEABLE(db); 719 720 mutex_enter(&db->db_mtx); 721 if (db->db_state == DB_CACHED) { 722 mutex_exit(&db->db_mtx); 723 if (prefetch) 724 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1); 725 if ((flags & DB_RF_HAVESTRUCT) == 0) 726 rw_exit(&dn->dn_struct_rwlock); 727 DB_DNODE_EXIT(db); 728 } else if (db->db_state == DB_UNCACHED) { 729 spa_t *spa = dn->dn_objset->os_spa; 730 731 if (zio == NULL) 732 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 733 dbuf_read_impl(db, zio, flags); 734 735 /* dbuf_read_impl has dropped db_mtx for us */ 736 737 if (prefetch) 738 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1); 739 740 if ((flags & DB_RF_HAVESTRUCT) == 0) 741 rw_exit(&dn->dn_struct_rwlock); 742 DB_DNODE_EXIT(db); 743 744 if (!havepzio) 745 err = zio_wait(zio); 746 } else { 747 /* 748 * Another reader came in while the dbuf was in flight 749 * between UNCACHED and CACHED. Either a writer will finish 750 * writing the buffer (sending the dbuf to CACHED) or the 751 * first reader's request will reach the read_done callback 752 * and send the dbuf to CACHED. Otherwise, a failure 753 * occurred and the dbuf went to UNCACHED. 754 */ 755 mutex_exit(&db->db_mtx); 756 if (prefetch) 757 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1); 758 if ((flags & DB_RF_HAVESTRUCT) == 0) 759 rw_exit(&dn->dn_struct_rwlock); 760 DB_DNODE_EXIT(db); 761 762 /* Skip the wait per the caller's request. */ 763 mutex_enter(&db->db_mtx); 764 if ((flags & DB_RF_NEVERWAIT) == 0) { 765 while (db->db_state == DB_READ || 766 db->db_state == DB_FILL) { 767 ASSERT(db->db_state == DB_READ || 768 (flags & DB_RF_HAVESTRUCT) == 0); 769 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 770 db, zio_t *, zio); 771 cv_wait(&db->db_changed, &db->db_mtx); 772 } 773 if (db->db_state == DB_UNCACHED) 774 err = SET_ERROR(EIO); 775 } 776 mutex_exit(&db->db_mtx); 777 } 778 779 ASSERT(err || havepzio || db->db_state == DB_CACHED); 780 return (err); 781 } 782 783 static void 784 dbuf_noread(dmu_buf_impl_t *db) 785 { 786 ASSERT(!refcount_is_zero(&db->db_holds)); 787 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 788 mutex_enter(&db->db_mtx); 789 while (db->db_state == DB_READ || db->db_state == DB_FILL) 790 cv_wait(&db->db_changed, &db->db_mtx); 791 if (db->db_state == DB_UNCACHED) { 792 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 793 spa_t *spa = db->db_objset->os_spa; 794 795 ASSERT(db->db_buf == NULL); 796 ASSERT(db->db.db_data == NULL); 797 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type)); 798 db->db_state = DB_FILL; 799 } else if (db->db_state == DB_NOFILL) { 800 dbuf_clear_data(db); 801 } else { 802 ASSERT3U(db->db_state, ==, DB_CACHED); 803 } 804 mutex_exit(&db->db_mtx); 805 } 806 807 /* 808 * This is our just-in-time copy function. It makes a copy of 809 * buffers, that have been modified in a previous transaction 810 * group, before we modify them in the current active group. 811 * 812 * This function is used in two places: when we are dirtying a 813 * buffer for the first time in a txg, and when we are freeing 814 * a range in a dnode that includes this buffer. 815 * 816 * Note that when we are called from dbuf_free_range() we do 817 * not put a hold on the buffer, we just traverse the active 818 * dbuf list for the dnode. 819 */ 820 static void 821 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 822 { 823 dbuf_dirty_record_t *dr = db->db_last_dirty; 824 825 ASSERT(MUTEX_HELD(&db->db_mtx)); 826 ASSERT(db->db.db_data != NULL); 827 ASSERT(db->db_level == 0); 828 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 829 830 if (dr == NULL || 831 (dr->dt.dl.dr_data != 832 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 833 return; 834 835 /* 836 * If the last dirty record for this dbuf has not yet synced 837 * and its referencing the dbuf data, either: 838 * reset the reference to point to a new copy, 839 * or (if there a no active holders) 840 * just null out the current db_data pointer. 841 */ 842 ASSERT(dr->dr_txg >= txg - 2); 843 if (db->db_blkid == DMU_BONUS_BLKID) { 844 /* Note that the data bufs here are zio_bufs */ 845 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 846 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 847 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 848 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 849 int size = db->db.db_size; 850 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 851 spa_t *spa = db->db_objset->os_spa; 852 853 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type); 854 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 855 } else { 856 dbuf_clear_data(db); 857 } 858 } 859 860 void 861 dbuf_unoverride(dbuf_dirty_record_t *dr) 862 { 863 dmu_buf_impl_t *db = dr->dr_dbuf; 864 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 865 uint64_t txg = dr->dr_txg; 866 867 ASSERT(MUTEX_HELD(&db->db_mtx)); 868 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 869 ASSERT(db->db_level == 0); 870 871 if (db->db_blkid == DMU_BONUS_BLKID || 872 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 873 return; 874 875 ASSERT(db->db_data_pending != dr); 876 877 /* free this block */ 878 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 879 zio_free(db->db_objset->os_spa, txg, bp); 880 881 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 882 dr->dt.dl.dr_nopwrite = B_FALSE; 883 884 /* 885 * Release the already-written buffer, so we leave it in 886 * a consistent dirty state. Note that all callers are 887 * modifying the buffer, so they will immediately do 888 * another (redundant) arc_release(). Therefore, leave 889 * the buf thawed to save the effort of freezing & 890 * immediately re-thawing it. 891 */ 892 arc_release(dr->dt.dl.dr_data, db); 893 } 894 895 /* 896 * Evict (if its unreferenced) or clear (if its referenced) any level-0 897 * data blocks in the free range, so that any future readers will find 898 * empty blocks. 899 * 900 * This is a no-op if the dataset is in the middle of an incremental 901 * receive; see comment below for details. 902 */ 903 void 904 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 905 dmu_tx_t *tx) 906 { 907 dmu_buf_impl_t db_search; 908 dmu_buf_impl_t *db, *db_next; 909 uint64_t txg = tx->tx_txg; 910 avl_index_t where; 911 912 if (end_blkid > dn->dn_maxblkid && (end_blkid != DMU_SPILL_BLKID)) 913 end_blkid = dn->dn_maxblkid; 914 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 915 916 db_search.db_level = 0; 917 db_search.db_blkid = start_blkid; 918 db_search.db_state = DB_SEARCH; 919 920 mutex_enter(&dn->dn_dbufs_mtx); 921 if (start_blkid >= dn->dn_unlisted_l0_blkid) { 922 /* There can't be any dbufs in this range; no need to search. */ 923 #ifdef DEBUG 924 db = avl_find(&dn->dn_dbufs, &db_search, &where); 925 ASSERT3P(db, ==, NULL); 926 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 927 ASSERT(db == NULL || db->db_level > 0); 928 #endif 929 mutex_exit(&dn->dn_dbufs_mtx); 930 return; 931 } else if (dmu_objset_is_receiving(dn->dn_objset)) { 932 /* 933 * If we are receiving, we expect there to be no dbufs in 934 * the range to be freed, because receive modifies each 935 * block at most once, and in offset order. If this is 936 * not the case, it can lead to performance problems, 937 * so note that we unexpectedly took the slow path. 938 */ 939 atomic_inc_64(&zfs_free_range_recv_miss); 940 } 941 942 db = avl_find(&dn->dn_dbufs, &db_search, &where); 943 ASSERT3P(db, ==, NULL); 944 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 945 946 for (; db != NULL; db = db_next) { 947 db_next = AVL_NEXT(&dn->dn_dbufs, db); 948 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 949 950 if (db->db_level != 0 || db->db_blkid > end_blkid) { 951 break; 952 } 953 ASSERT3U(db->db_blkid, >=, start_blkid); 954 955 /* found a level 0 buffer in the range */ 956 mutex_enter(&db->db_mtx); 957 if (dbuf_undirty(db, tx)) { 958 /* mutex has been dropped and dbuf destroyed */ 959 continue; 960 } 961 962 if (db->db_state == DB_UNCACHED || 963 db->db_state == DB_NOFILL || 964 db->db_state == DB_EVICTING) { 965 ASSERT(db->db.db_data == NULL); 966 mutex_exit(&db->db_mtx); 967 continue; 968 } 969 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 970 /* will be handled in dbuf_read_done or dbuf_rele */ 971 db->db_freed_in_flight = TRUE; 972 mutex_exit(&db->db_mtx); 973 continue; 974 } 975 if (refcount_count(&db->db_holds) == 0) { 976 ASSERT(db->db_buf); 977 dbuf_clear(db); 978 continue; 979 } 980 /* The dbuf is referenced */ 981 982 if (db->db_last_dirty != NULL) { 983 dbuf_dirty_record_t *dr = db->db_last_dirty; 984 985 if (dr->dr_txg == txg) { 986 /* 987 * This buffer is "in-use", re-adjust the file 988 * size to reflect that this buffer may 989 * contain new data when we sync. 990 */ 991 if (db->db_blkid != DMU_SPILL_BLKID && 992 db->db_blkid > dn->dn_maxblkid) 993 dn->dn_maxblkid = db->db_blkid; 994 dbuf_unoverride(dr); 995 } else { 996 /* 997 * This dbuf is not dirty in the open context. 998 * Either uncache it (if its not referenced in 999 * the open context) or reset its contents to 1000 * empty. 1001 */ 1002 dbuf_fix_old_data(db, txg); 1003 } 1004 } 1005 /* clear the contents if its cached */ 1006 if (db->db_state == DB_CACHED) { 1007 ASSERT(db->db.db_data != NULL); 1008 arc_release(db->db_buf, db); 1009 bzero(db->db.db_data, db->db.db_size); 1010 arc_buf_freeze(db->db_buf); 1011 } 1012 1013 mutex_exit(&db->db_mtx); 1014 } 1015 mutex_exit(&dn->dn_dbufs_mtx); 1016 } 1017 1018 static int 1019 dbuf_block_freeable(dmu_buf_impl_t *db) 1020 { 1021 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; 1022 uint64_t birth_txg = 0; 1023 1024 /* 1025 * We don't need any locking to protect db_blkptr: 1026 * If it's syncing, then db_last_dirty will be set 1027 * so we'll ignore db_blkptr. 1028 * 1029 * This logic ensures that only block births for 1030 * filled blocks are considered. 1031 */ 1032 ASSERT(MUTEX_HELD(&db->db_mtx)); 1033 if (db->db_last_dirty && (db->db_blkptr == NULL || 1034 !BP_IS_HOLE(db->db_blkptr))) { 1035 birth_txg = db->db_last_dirty->dr_txg; 1036 } else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1037 birth_txg = db->db_blkptr->blk_birth; 1038 } 1039 1040 /* 1041 * If this block don't exist or is in a snapshot, it can't be freed. 1042 * Don't pass the bp to dsl_dataset_block_freeable() since we 1043 * are holding the db_mtx lock and might deadlock if we are 1044 * prefetching a dedup-ed block. 1045 */ 1046 if (birth_txg != 0) 1047 return (ds == NULL || 1048 dsl_dataset_block_freeable(ds, NULL, birth_txg)); 1049 else 1050 return (B_FALSE); 1051 } 1052 1053 void 1054 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1055 { 1056 arc_buf_t *buf, *obuf; 1057 int osize = db->db.db_size; 1058 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1059 dnode_t *dn; 1060 1061 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1062 1063 DB_DNODE_ENTER(db); 1064 dn = DB_DNODE(db); 1065 1066 /* XXX does *this* func really need the lock? */ 1067 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1068 1069 /* 1070 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1071 * is OK, because there can be no other references to the db 1072 * when we are changing its size, so no concurrent DB_FILL can 1073 * be happening. 1074 */ 1075 /* 1076 * XXX we should be doing a dbuf_read, checking the return 1077 * value and returning that up to our callers 1078 */ 1079 dmu_buf_will_dirty(&db->db, tx); 1080 1081 /* create the data buffer for the new block */ 1082 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type); 1083 1084 /* copy old block data to the new block */ 1085 obuf = db->db_buf; 1086 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1087 /* zero the remainder */ 1088 if (size > osize) 1089 bzero((uint8_t *)buf->b_data + osize, size - osize); 1090 1091 mutex_enter(&db->db_mtx); 1092 dbuf_set_data(db, buf); 1093 VERIFY(arc_buf_remove_ref(obuf, db)); 1094 db->db.db_size = size; 1095 1096 if (db->db_level == 0) { 1097 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1098 db->db_last_dirty->dt.dl.dr_data = buf; 1099 } 1100 mutex_exit(&db->db_mtx); 1101 1102 dnode_willuse_space(dn, size-osize, tx); 1103 DB_DNODE_EXIT(db); 1104 } 1105 1106 void 1107 dbuf_release_bp(dmu_buf_impl_t *db) 1108 { 1109 objset_t *os = db->db_objset; 1110 1111 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1112 ASSERT(arc_released(os->os_phys_buf) || 1113 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1114 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1115 1116 (void) arc_release(db->db_buf, db); 1117 } 1118 1119 /* 1120 * We already have a dirty record for this TXG, and we are being 1121 * dirtied again. 1122 */ 1123 static void 1124 dbuf_redirty(dbuf_dirty_record_t *dr) 1125 { 1126 dmu_buf_impl_t *db = dr->dr_dbuf; 1127 1128 ASSERT(MUTEX_HELD(&db->db_mtx)); 1129 1130 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1131 /* 1132 * If this buffer has already been written out, 1133 * we now need to reset its state. 1134 */ 1135 dbuf_unoverride(dr); 1136 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1137 db->db_state != DB_NOFILL) { 1138 /* Already released on initial dirty, so just thaw. */ 1139 ASSERT(arc_released(db->db_buf)); 1140 arc_buf_thaw(db->db_buf); 1141 } 1142 } 1143 } 1144 1145 dbuf_dirty_record_t * 1146 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1147 { 1148 dnode_t *dn; 1149 objset_t *os; 1150 dbuf_dirty_record_t **drp, *dr; 1151 int drop_struct_lock = FALSE; 1152 boolean_t do_free_accounting = B_FALSE; 1153 int txgoff = tx->tx_txg & TXG_MASK; 1154 1155 ASSERT(tx->tx_txg != 0); 1156 ASSERT(!refcount_is_zero(&db->db_holds)); 1157 DMU_TX_DIRTY_BUF(tx, db); 1158 1159 DB_DNODE_ENTER(db); 1160 dn = DB_DNODE(db); 1161 /* 1162 * Shouldn't dirty a regular buffer in syncing context. Private 1163 * objects may be dirtied in syncing context, but only if they 1164 * were already pre-dirtied in open context. 1165 */ 1166 ASSERT(!dmu_tx_is_syncing(tx) || 1167 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1168 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1169 dn->dn_objset->os_dsl_dataset == NULL); 1170 /* 1171 * We make this assert for private objects as well, but after we 1172 * check if we're already dirty. They are allowed to re-dirty 1173 * in syncing context. 1174 */ 1175 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1176 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1177 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1178 1179 mutex_enter(&db->db_mtx); 1180 /* 1181 * XXX make this true for indirects too? The problem is that 1182 * transactions created with dmu_tx_create_assigned() from 1183 * syncing context don't bother holding ahead. 1184 */ 1185 ASSERT(db->db_level != 0 || 1186 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1187 db->db_state == DB_NOFILL); 1188 1189 mutex_enter(&dn->dn_mtx); 1190 /* 1191 * Don't set dirtyctx to SYNC if we're just modifying this as we 1192 * initialize the objset. 1193 */ 1194 if (dn->dn_dirtyctx == DN_UNDIRTIED && 1195 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1196 dn->dn_dirtyctx = 1197 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1198 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1199 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1200 } 1201 mutex_exit(&dn->dn_mtx); 1202 1203 if (db->db_blkid == DMU_SPILL_BLKID) 1204 dn->dn_have_spill = B_TRUE; 1205 1206 /* 1207 * If this buffer is already dirty, we're done. 1208 */ 1209 drp = &db->db_last_dirty; 1210 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1211 db->db.db_object == DMU_META_DNODE_OBJECT); 1212 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1213 drp = &dr->dr_next; 1214 if (dr && dr->dr_txg == tx->tx_txg) { 1215 DB_DNODE_EXIT(db); 1216 1217 dbuf_redirty(dr); 1218 mutex_exit(&db->db_mtx); 1219 return (dr); 1220 } 1221 1222 /* 1223 * Only valid if not already dirty. 1224 */ 1225 ASSERT(dn->dn_object == 0 || 1226 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1227 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1228 1229 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1230 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1231 dn->dn_phys->dn_nlevels > db->db_level || 1232 dn->dn_next_nlevels[txgoff] > db->db_level || 1233 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1234 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1235 1236 /* 1237 * We should only be dirtying in syncing context if it's the 1238 * mos or we're initializing the os or it's a special object. 1239 * However, we are allowed to dirty in syncing context provided 1240 * we already dirtied it in open context. Hence we must make 1241 * this assertion only if we're not already dirty. 1242 */ 1243 os = dn->dn_objset; 1244 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1245 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1246 ASSERT(db->db.db_size != 0); 1247 1248 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1249 1250 if (db->db_blkid != DMU_BONUS_BLKID) { 1251 /* 1252 * Update the accounting. 1253 * Note: we delay "free accounting" until after we drop 1254 * the db_mtx. This keeps us from grabbing other locks 1255 * (and possibly deadlocking) in bp_get_dsize() while 1256 * also holding the db_mtx. 1257 */ 1258 dnode_willuse_space(dn, db->db.db_size, tx); 1259 do_free_accounting = dbuf_block_freeable(db); 1260 } 1261 1262 /* 1263 * If this buffer is dirty in an old transaction group we need 1264 * to make a copy of it so that the changes we make in this 1265 * transaction group won't leak out when we sync the older txg. 1266 */ 1267 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1268 if (db->db_level == 0) { 1269 void *data_old = db->db_buf; 1270 1271 if (db->db_state != DB_NOFILL) { 1272 if (db->db_blkid == DMU_BONUS_BLKID) { 1273 dbuf_fix_old_data(db, tx->tx_txg); 1274 data_old = db->db.db_data; 1275 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1276 /* 1277 * Release the data buffer from the cache so 1278 * that we can modify it without impacting 1279 * possible other users of this cached data 1280 * block. Note that indirect blocks and 1281 * private objects are not released until the 1282 * syncing state (since they are only modified 1283 * then). 1284 */ 1285 arc_release(db->db_buf, db); 1286 dbuf_fix_old_data(db, tx->tx_txg); 1287 data_old = db->db_buf; 1288 } 1289 ASSERT(data_old != NULL); 1290 } 1291 dr->dt.dl.dr_data = data_old; 1292 } else { 1293 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1294 list_create(&dr->dt.di.dr_children, 1295 sizeof (dbuf_dirty_record_t), 1296 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1297 } 1298 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 1299 dr->dr_accounted = db->db.db_size; 1300 dr->dr_dbuf = db; 1301 dr->dr_txg = tx->tx_txg; 1302 dr->dr_next = *drp; 1303 *drp = dr; 1304 1305 /* 1306 * We could have been freed_in_flight between the dbuf_noread 1307 * and dbuf_dirty. We win, as though the dbuf_noread() had 1308 * happened after the free. 1309 */ 1310 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1311 db->db_blkid != DMU_SPILL_BLKID) { 1312 mutex_enter(&dn->dn_mtx); 1313 if (dn->dn_free_ranges[txgoff] != NULL) { 1314 range_tree_clear(dn->dn_free_ranges[txgoff], 1315 db->db_blkid, 1); 1316 } 1317 mutex_exit(&dn->dn_mtx); 1318 db->db_freed_in_flight = FALSE; 1319 } 1320 1321 /* 1322 * This buffer is now part of this txg 1323 */ 1324 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1325 db->db_dirtycnt += 1; 1326 ASSERT3U(db->db_dirtycnt, <=, 3); 1327 1328 mutex_exit(&db->db_mtx); 1329 1330 if (db->db_blkid == DMU_BONUS_BLKID || 1331 db->db_blkid == DMU_SPILL_BLKID) { 1332 mutex_enter(&dn->dn_mtx); 1333 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1334 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1335 mutex_exit(&dn->dn_mtx); 1336 dnode_setdirty(dn, tx); 1337 DB_DNODE_EXIT(db); 1338 return (dr); 1339 } else if (do_free_accounting) { 1340 blkptr_t *bp = db->db_blkptr; 1341 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? 1342 bp_get_dsize(os->os_spa, bp) : db->db.db_size; 1343 /* 1344 * This is only a guess -- if the dbuf is dirty 1345 * in a previous txg, we don't know how much 1346 * space it will use on disk yet. We should 1347 * really have the struct_rwlock to access 1348 * db_blkptr, but since this is just a guess, 1349 * it's OK if we get an odd answer. 1350 */ 1351 ddt_prefetch(os->os_spa, bp); 1352 dnode_willuse_space(dn, -willfree, tx); 1353 } 1354 1355 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1356 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1357 drop_struct_lock = TRUE; 1358 } 1359 1360 if (db->db_level == 0) { 1361 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1362 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1363 } 1364 1365 if (db->db_level+1 < dn->dn_nlevels) { 1366 dmu_buf_impl_t *parent = db->db_parent; 1367 dbuf_dirty_record_t *di; 1368 int parent_held = FALSE; 1369 1370 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1371 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1372 1373 parent = dbuf_hold_level(dn, db->db_level+1, 1374 db->db_blkid >> epbs, FTAG); 1375 ASSERT(parent != NULL); 1376 parent_held = TRUE; 1377 } 1378 if (drop_struct_lock) 1379 rw_exit(&dn->dn_struct_rwlock); 1380 ASSERT3U(db->db_level+1, ==, parent->db_level); 1381 di = dbuf_dirty(parent, tx); 1382 if (parent_held) 1383 dbuf_rele(parent, FTAG); 1384 1385 mutex_enter(&db->db_mtx); 1386 /* 1387 * Since we've dropped the mutex, it's possible that 1388 * dbuf_undirty() might have changed this out from under us. 1389 */ 1390 if (db->db_last_dirty == dr || 1391 dn->dn_object == DMU_META_DNODE_OBJECT) { 1392 mutex_enter(&di->dt.di.dr_mtx); 1393 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1394 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1395 list_insert_tail(&di->dt.di.dr_children, dr); 1396 mutex_exit(&di->dt.di.dr_mtx); 1397 dr->dr_parent = di; 1398 } 1399 mutex_exit(&db->db_mtx); 1400 } else { 1401 ASSERT(db->db_level+1 == dn->dn_nlevels); 1402 ASSERT(db->db_blkid < dn->dn_nblkptr); 1403 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1404 mutex_enter(&dn->dn_mtx); 1405 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1406 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1407 mutex_exit(&dn->dn_mtx); 1408 if (drop_struct_lock) 1409 rw_exit(&dn->dn_struct_rwlock); 1410 } 1411 1412 dnode_setdirty(dn, tx); 1413 DB_DNODE_EXIT(db); 1414 return (dr); 1415 } 1416 1417 /* 1418 * Undirty a buffer in the transaction group referenced by the given 1419 * transaction. Return whether this evicted the dbuf. 1420 */ 1421 static boolean_t 1422 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1423 { 1424 dnode_t *dn; 1425 uint64_t txg = tx->tx_txg; 1426 dbuf_dirty_record_t *dr, **drp; 1427 1428 ASSERT(txg != 0); 1429 1430 /* 1431 * Due to our use of dn_nlevels below, this can only be called 1432 * in open context, unless we are operating on the MOS. 1433 * From syncing context, dn_nlevels may be different from the 1434 * dn_nlevels used when dbuf was dirtied. 1435 */ 1436 ASSERT(db->db_objset == 1437 dmu_objset_pool(db->db_objset)->dp_meta_objset || 1438 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 1439 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1440 ASSERT0(db->db_level); 1441 ASSERT(MUTEX_HELD(&db->db_mtx)); 1442 1443 /* 1444 * If this buffer is not dirty, we're done. 1445 */ 1446 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1447 if (dr->dr_txg <= txg) 1448 break; 1449 if (dr == NULL || dr->dr_txg < txg) 1450 return (B_FALSE); 1451 ASSERT(dr->dr_txg == txg); 1452 ASSERT(dr->dr_dbuf == db); 1453 1454 DB_DNODE_ENTER(db); 1455 dn = DB_DNODE(db); 1456 1457 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1458 1459 ASSERT(db->db.db_size != 0); 1460 1461 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 1462 dr->dr_accounted, txg); 1463 1464 *drp = dr->dr_next; 1465 1466 /* 1467 * Note that there are three places in dbuf_dirty() 1468 * where this dirty record may be put on a list. 1469 * Make sure to do a list_remove corresponding to 1470 * every one of those list_insert calls. 1471 */ 1472 if (dr->dr_parent) { 1473 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1474 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1475 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1476 } else if (db->db_blkid == DMU_SPILL_BLKID || 1477 db->db_level + 1 == dn->dn_nlevels) { 1478 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1479 mutex_enter(&dn->dn_mtx); 1480 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1481 mutex_exit(&dn->dn_mtx); 1482 } 1483 DB_DNODE_EXIT(db); 1484 1485 if (db->db_state != DB_NOFILL) { 1486 dbuf_unoverride(dr); 1487 1488 ASSERT(db->db_buf != NULL); 1489 ASSERT(dr->dt.dl.dr_data != NULL); 1490 if (dr->dt.dl.dr_data != db->db_buf) 1491 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db)); 1492 } 1493 1494 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1495 1496 ASSERT(db->db_dirtycnt > 0); 1497 db->db_dirtycnt -= 1; 1498 1499 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1500 arc_buf_t *buf = db->db_buf; 1501 1502 ASSERT(db->db_state == DB_NOFILL || arc_released(buf)); 1503 dbuf_clear_data(db); 1504 VERIFY(arc_buf_remove_ref(buf, db)); 1505 dbuf_evict(db); 1506 return (B_TRUE); 1507 } 1508 1509 return (B_FALSE); 1510 } 1511 1512 void 1513 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1514 { 1515 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1516 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1517 1518 ASSERT(tx->tx_txg != 0); 1519 ASSERT(!refcount_is_zero(&db->db_holds)); 1520 1521 /* 1522 * Quick check for dirtyness. For already dirty blocks, this 1523 * reduces runtime of this function by >90%, and overall performance 1524 * by 50% for some workloads (e.g. file deletion with indirect blocks 1525 * cached). 1526 */ 1527 mutex_enter(&db->db_mtx); 1528 dbuf_dirty_record_t *dr; 1529 for (dr = db->db_last_dirty; 1530 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 1531 /* 1532 * It's possible that it is already dirty but not cached, 1533 * because there are some calls to dbuf_dirty() that don't 1534 * go through dmu_buf_will_dirty(). 1535 */ 1536 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 1537 /* This dbuf is already dirty and cached. */ 1538 dbuf_redirty(dr); 1539 mutex_exit(&db->db_mtx); 1540 return; 1541 } 1542 } 1543 mutex_exit(&db->db_mtx); 1544 1545 DB_DNODE_ENTER(db); 1546 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1547 rf |= DB_RF_HAVESTRUCT; 1548 DB_DNODE_EXIT(db); 1549 (void) dbuf_read(db, NULL, rf); 1550 (void) dbuf_dirty(db, tx); 1551 } 1552 1553 void 1554 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1555 { 1556 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1557 1558 db->db_state = DB_NOFILL; 1559 1560 dmu_buf_will_fill(db_fake, tx); 1561 } 1562 1563 void 1564 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1565 { 1566 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1567 1568 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1569 ASSERT(tx->tx_txg != 0); 1570 ASSERT(db->db_level == 0); 1571 ASSERT(!refcount_is_zero(&db->db_holds)); 1572 1573 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1574 dmu_tx_private_ok(tx)); 1575 1576 dbuf_noread(db); 1577 (void) dbuf_dirty(db, tx); 1578 } 1579 1580 #pragma weak dmu_buf_fill_done = dbuf_fill_done 1581 /* ARGSUSED */ 1582 void 1583 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1584 { 1585 mutex_enter(&db->db_mtx); 1586 DBUF_VERIFY(db); 1587 1588 if (db->db_state == DB_FILL) { 1589 if (db->db_level == 0 && db->db_freed_in_flight) { 1590 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1591 /* we were freed while filling */ 1592 /* XXX dbuf_undirty? */ 1593 bzero(db->db.db_data, db->db.db_size); 1594 db->db_freed_in_flight = FALSE; 1595 } 1596 db->db_state = DB_CACHED; 1597 cv_broadcast(&db->db_changed); 1598 } 1599 mutex_exit(&db->db_mtx); 1600 } 1601 1602 void 1603 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 1604 bp_embedded_type_t etype, enum zio_compress comp, 1605 int uncompressed_size, int compressed_size, int byteorder, 1606 dmu_tx_t *tx) 1607 { 1608 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 1609 struct dirty_leaf *dl; 1610 dmu_object_type_t type; 1611 1612 if (etype == BP_EMBEDDED_TYPE_DATA) { 1613 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1614 SPA_FEATURE_EMBEDDED_DATA)); 1615 } 1616 1617 DB_DNODE_ENTER(db); 1618 type = DB_DNODE(db)->dn_type; 1619 DB_DNODE_EXIT(db); 1620 1621 ASSERT0(db->db_level); 1622 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1623 1624 dmu_buf_will_not_fill(dbuf, tx); 1625 1626 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1627 dl = &db->db_last_dirty->dt.dl; 1628 encode_embedded_bp_compressed(&dl->dr_overridden_by, 1629 data, comp, uncompressed_size, compressed_size); 1630 BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 1631 BP_SET_TYPE(&dl->dr_overridden_by, type); 1632 BP_SET_LEVEL(&dl->dr_overridden_by, 0); 1633 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 1634 1635 dl->dr_override_state = DR_OVERRIDDEN; 1636 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 1637 } 1638 1639 /* 1640 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1641 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1642 */ 1643 void 1644 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1645 { 1646 ASSERT(!refcount_is_zero(&db->db_holds)); 1647 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1648 ASSERT(db->db_level == 0); 1649 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA); 1650 ASSERT(buf != NULL); 1651 ASSERT(arc_buf_size(buf) == db->db.db_size); 1652 ASSERT(tx->tx_txg != 0); 1653 1654 arc_return_buf(buf, db); 1655 ASSERT(arc_released(buf)); 1656 1657 mutex_enter(&db->db_mtx); 1658 1659 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1660 cv_wait(&db->db_changed, &db->db_mtx); 1661 1662 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1663 1664 if (db->db_state == DB_CACHED && 1665 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1666 mutex_exit(&db->db_mtx); 1667 (void) dbuf_dirty(db, tx); 1668 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1669 VERIFY(arc_buf_remove_ref(buf, db)); 1670 xuio_stat_wbuf_copied(); 1671 return; 1672 } 1673 1674 xuio_stat_wbuf_nocopy(); 1675 if (db->db_state == DB_CACHED) { 1676 dbuf_dirty_record_t *dr = db->db_last_dirty; 1677 1678 ASSERT(db->db_buf != NULL); 1679 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1680 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1681 if (!arc_released(db->db_buf)) { 1682 ASSERT(dr->dt.dl.dr_override_state == 1683 DR_OVERRIDDEN); 1684 arc_release(db->db_buf, db); 1685 } 1686 dr->dt.dl.dr_data = buf; 1687 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1688 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 1689 arc_release(db->db_buf, db); 1690 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1691 } 1692 db->db_buf = NULL; 1693 } 1694 ASSERT(db->db_buf == NULL); 1695 dbuf_set_data(db, buf); 1696 db->db_state = DB_FILL; 1697 mutex_exit(&db->db_mtx); 1698 (void) dbuf_dirty(db, tx); 1699 dmu_buf_fill_done(&db->db, tx); 1700 } 1701 1702 /* 1703 * "Clear" the contents of this dbuf. This will mark the dbuf 1704 * EVICTING and clear *most* of its references. Unfortunately, 1705 * when we are not holding the dn_dbufs_mtx, we can't clear the 1706 * entry in the dn_dbufs list. We have to wait until dbuf_destroy() 1707 * in this case. For callers from the DMU we will usually see: 1708 * dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy() 1709 * For the arc callback, we will usually see: 1710 * dbuf_do_evict()->dbuf_clear();dbuf_destroy() 1711 * Sometimes, though, we will get a mix of these two: 1712 * DMU: dbuf_clear()->arc_clear_callback() 1713 * ARC: dbuf_do_evict()->dbuf_destroy() 1714 * 1715 * This routine will dissociate the dbuf from the arc, by calling 1716 * arc_clear_callback(), but will not evict the data from the ARC. 1717 */ 1718 void 1719 dbuf_clear(dmu_buf_impl_t *db) 1720 { 1721 dnode_t *dn; 1722 dmu_buf_impl_t *parent = db->db_parent; 1723 dmu_buf_impl_t *dndb; 1724 boolean_t dbuf_gone = B_FALSE; 1725 1726 ASSERT(MUTEX_HELD(&db->db_mtx)); 1727 ASSERT(refcount_is_zero(&db->db_holds)); 1728 1729 dbuf_evict_user(db); 1730 1731 if (db->db_state == DB_CACHED) { 1732 ASSERT(db->db.db_data != NULL); 1733 if (db->db_blkid == DMU_BONUS_BLKID) { 1734 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 1735 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1736 } 1737 db->db.db_data = NULL; 1738 db->db_state = DB_UNCACHED; 1739 } 1740 1741 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1742 ASSERT(db->db_data_pending == NULL); 1743 1744 db->db_state = DB_EVICTING; 1745 db->db_blkptr = NULL; 1746 1747 DB_DNODE_ENTER(db); 1748 dn = DB_DNODE(db); 1749 dndb = dn->dn_dbuf; 1750 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { 1751 avl_remove(&dn->dn_dbufs, db); 1752 atomic_dec_32(&dn->dn_dbufs_count); 1753 membar_producer(); 1754 DB_DNODE_EXIT(db); 1755 /* 1756 * Decrementing the dbuf count means that the hold corresponding 1757 * to the removed dbuf is no longer discounted in dnode_move(), 1758 * so the dnode cannot be moved until after we release the hold. 1759 * The membar_producer() ensures visibility of the decremented 1760 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 1761 * release any lock. 1762 */ 1763 dnode_rele(dn, db); 1764 db->db_dnode_handle = NULL; 1765 } else { 1766 DB_DNODE_EXIT(db); 1767 } 1768 1769 if (db->db_buf) 1770 dbuf_gone = arc_clear_callback(db->db_buf); 1771 1772 if (!dbuf_gone) 1773 mutex_exit(&db->db_mtx); 1774 1775 /* 1776 * If this dbuf is referenced from an indirect dbuf, 1777 * decrement the ref count on the indirect dbuf. 1778 */ 1779 if (parent && parent != dndb) 1780 dbuf_rele(parent, db); 1781 } 1782 1783 /* 1784 * Note: While bpp will always be updated if the function returns success, 1785 * parentp will not be updated if the dnode does not have dn_dbuf filled in; 1786 * this happens when the dnode is the meta-dnode, or a userused or groupused 1787 * object. 1788 */ 1789 static int 1790 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 1791 dmu_buf_impl_t **parentp, blkptr_t **bpp) 1792 { 1793 int nlevels, epbs; 1794 1795 *parentp = NULL; 1796 *bpp = NULL; 1797 1798 ASSERT(blkid != DMU_BONUS_BLKID); 1799 1800 if (blkid == DMU_SPILL_BLKID) { 1801 mutex_enter(&dn->dn_mtx); 1802 if (dn->dn_have_spill && 1803 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1804 *bpp = &dn->dn_phys->dn_spill; 1805 else 1806 *bpp = NULL; 1807 dbuf_add_ref(dn->dn_dbuf, NULL); 1808 *parentp = dn->dn_dbuf; 1809 mutex_exit(&dn->dn_mtx); 1810 return (0); 1811 } 1812 1813 if (dn->dn_phys->dn_nlevels == 0) 1814 nlevels = 1; 1815 else 1816 nlevels = dn->dn_phys->dn_nlevels; 1817 1818 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1819 1820 ASSERT3U(level * epbs, <, 64); 1821 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1822 if (level >= nlevels || 1823 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 1824 /* the buffer has no parent yet */ 1825 return (SET_ERROR(ENOENT)); 1826 } else if (level < nlevels-1) { 1827 /* this block is referenced from an indirect block */ 1828 int err = dbuf_hold_impl(dn, level+1, 1829 blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 1830 if (err) 1831 return (err); 1832 err = dbuf_read(*parentp, NULL, 1833 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 1834 if (err) { 1835 dbuf_rele(*parentp, NULL); 1836 *parentp = NULL; 1837 return (err); 1838 } 1839 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 1840 (blkid & ((1ULL << epbs) - 1)); 1841 return (0); 1842 } else { 1843 /* the block is referenced from the dnode */ 1844 ASSERT3U(level, ==, nlevels-1); 1845 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 1846 blkid < dn->dn_phys->dn_nblkptr); 1847 if (dn->dn_dbuf) { 1848 dbuf_add_ref(dn->dn_dbuf, NULL); 1849 *parentp = dn->dn_dbuf; 1850 } 1851 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 1852 return (0); 1853 } 1854 } 1855 1856 static dmu_buf_impl_t * 1857 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 1858 dmu_buf_impl_t *parent, blkptr_t *blkptr) 1859 { 1860 objset_t *os = dn->dn_objset; 1861 dmu_buf_impl_t *db, *odb; 1862 1863 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1864 ASSERT(dn->dn_type != DMU_OT_NONE); 1865 1866 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); 1867 1868 db->db_objset = os; 1869 db->db.db_object = dn->dn_object; 1870 db->db_level = level; 1871 db->db_blkid = blkid; 1872 db->db_last_dirty = NULL; 1873 db->db_dirtycnt = 0; 1874 db->db_dnode_handle = dn->dn_handle; 1875 db->db_parent = parent; 1876 db->db_blkptr = blkptr; 1877 1878 db->db_user = NULL; 1879 db->db_user_immediate_evict = FALSE; 1880 db->db_freed_in_flight = FALSE; 1881 db->db_pending_evict = FALSE; 1882 1883 if (blkid == DMU_BONUS_BLKID) { 1884 ASSERT3P(parent, ==, dn->dn_dbuf); 1885 db->db.db_size = DN_MAX_BONUSLEN - 1886 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 1887 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1888 db->db.db_offset = DMU_BONUS_BLKID; 1889 db->db_state = DB_UNCACHED; 1890 /* the bonus dbuf is not placed in the hash table */ 1891 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1892 return (db); 1893 } else if (blkid == DMU_SPILL_BLKID) { 1894 db->db.db_size = (blkptr != NULL) ? 1895 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 1896 db->db.db_offset = 0; 1897 } else { 1898 int blocksize = 1899 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 1900 db->db.db_size = blocksize; 1901 db->db.db_offset = db->db_blkid * blocksize; 1902 } 1903 1904 /* 1905 * Hold the dn_dbufs_mtx while we get the new dbuf 1906 * in the hash table *and* added to the dbufs list. 1907 * This prevents a possible deadlock with someone 1908 * trying to look up this dbuf before its added to the 1909 * dn_dbufs list. 1910 */ 1911 mutex_enter(&dn->dn_dbufs_mtx); 1912 db->db_state = DB_EVICTING; 1913 if ((odb = dbuf_hash_insert(db)) != NULL) { 1914 /* someone else inserted it first */ 1915 kmem_cache_free(dbuf_cache, db); 1916 mutex_exit(&dn->dn_dbufs_mtx); 1917 return (odb); 1918 } 1919 avl_add(&dn->dn_dbufs, db); 1920 if (db->db_level == 0 && db->db_blkid >= 1921 dn->dn_unlisted_l0_blkid) 1922 dn->dn_unlisted_l0_blkid = db->db_blkid + 1; 1923 db->db_state = DB_UNCACHED; 1924 mutex_exit(&dn->dn_dbufs_mtx); 1925 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1926 1927 if (parent && parent != dn->dn_dbuf) 1928 dbuf_add_ref(parent, db); 1929 1930 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1931 refcount_count(&dn->dn_holds) > 0); 1932 (void) refcount_add(&dn->dn_holds, db); 1933 atomic_inc_32(&dn->dn_dbufs_count); 1934 1935 dprintf_dbuf(db, "db=%p\n", db); 1936 1937 return (db); 1938 } 1939 1940 static int 1941 dbuf_do_evict(void *private) 1942 { 1943 dmu_buf_impl_t *db = private; 1944 1945 if (!MUTEX_HELD(&db->db_mtx)) 1946 mutex_enter(&db->db_mtx); 1947 1948 ASSERT(refcount_is_zero(&db->db_holds)); 1949 1950 if (db->db_state != DB_EVICTING) { 1951 ASSERT(db->db_state == DB_CACHED); 1952 DBUF_VERIFY(db); 1953 db->db_buf = NULL; 1954 dbuf_evict(db); 1955 } else { 1956 mutex_exit(&db->db_mtx); 1957 dbuf_destroy(db); 1958 } 1959 return (0); 1960 } 1961 1962 static void 1963 dbuf_destroy(dmu_buf_impl_t *db) 1964 { 1965 ASSERT(refcount_is_zero(&db->db_holds)); 1966 1967 if (db->db_blkid != DMU_BONUS_BLKID) { 1968 /* 1969 * If this dbuf is still on the dn_dbufs list, 1970 * remove it from that list. 1971 */ 1972 if (db->db_dnode_handle != NULL) { 1973 dnode_t *dn; 1974 1975 DB_DNODE_ENTER(db); 1976 dn = DB_DNODE(db); 1977 mutex_enter(&dn->dn_dbufs_mtx); 1978 avl_remove(&dn->dn_dbufs, db); 1979 atomic_dec_32(&dn->dn_dbufs_count); 1980 mutex_exit(&dn->dn_dbufs_mtx); 1981 DB_DNODE_EXIT(db); 1982 /* 1983 * Decrementing the dbuf count means that the hold 1984 * corresponding to the removed dbuf is no longer 1985 * discounted in dnode_move(), so the dnode cannot be 1986 * moved until after we release the hold. 1987 */ 1988 dnode_rele(dn, db); 1989 db->db_dnode_handle = NULL; 1990 } 1991 dbuf_hash_remove(db); 1992 } 1993 db->db_parent = NULL; 1994 db->db_buf = NULL; 1995 1996 ASSERT(db->db.db_data == NULL); 1997 ASSERT(db->db_hash_next == NULL); 1998 ASSERT(db->db_blkptr == NULL); 1999 ASSERT(db->db_data_pending == NULL); 2000 2001 kmem_cache_free(dbuf_cache, db); 2002 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2003 } 2004 2005 typedef struct dbuf_prefetch_arg { 2006 spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2007 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2008 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2009 int dpa_curlevel; /* The current level that we're reading */ 2010 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2011 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2012 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2013 } dbuf_prefetch_arg_t; 2014 2015 /* 2016 * Actually issue the prefetch read for the block given. 2017 */ 2018 static void 2019 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2020 { 2021 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2022 return; 2023 2024 arc_flags_t aflags = 2025 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2026 2027 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2028 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2029 ASSERT(dpa->dpa_zio != NULL); 2030 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2031 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2032 &aflags, &dpa->dpa_zb); 2033 } 2034 2035 /* 2036 * Called when an indirect block above our prefetch target is read in. This 2037 * will either read in the next indirect block down the tree or issue the actual 2038 * prefetch if the next block down is our target. 2039 */ 2040 static void 2041 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2042 { 2043 dbuf_prefetch_arg_t *dpa = private; 2044 2045 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2046 ASSERT3S(dpa->dpa_curlevel, >, 0); 2047 if (zio != NULL) { 2048 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2049 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2050 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2051 } 2052 2053 dpa->dpa_curlevel--; 2054 2055 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2056 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2057 blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2058 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2059 if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { 2060 kmem_free(dpa, sizeof (*dpa)); 2061 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2062 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2063 dbuf_issue_final_prefetch(dpa, bp); 2064 kmem_free(dpa, sizeof (*dpa)); 2065 } else { 2066 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2067 zbookmark_phys_t zb; 2068 2069 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2070 2071 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2072 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2073 2074 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2075 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2076 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2077 &iter_aflags, &zb); 2078 } 2079 (void) arc_buf_remove_ref(abuf, private); 2080 } 2081 2082 /* 2083 * Issue prefetch reads for the given block on the given level. If the indirect 2084 * blocks above that block are not in memory, we will read them in 2085 * asynchronously. As a result, this call never blocks waiting for a read to 2086 * complete. 2087 */ 2088 void 2089 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2090 arc_flags_t aflags) 2091 { 2092 blkptr_t bp; 2093 int epbs, nlevels, curlevel; 2094 uint64_t curblkid; 2095 2096 ASSERT(blkid != DMU_BONUS_BLKID); 2097 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2098 2099 if (blkid > dn->dn_maxblkid) 2100 return; 2101 2102 if (dnode_block_freed(dn, blkid)) 2103 return; 2104 2105 /* 2106 * This dnode hasn't been written to disk yet, so there's nothing to 2107 * prefetch. 2108 */ 2109 nlevels = dn->dn_phys->dn_nlevels; 2110 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2111 return; 2112 2113 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2114 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2115 return; 2116 2117 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2118 level, blkid); 2119 if (db != NULL) { 2120 mutex_exit(&db->db_mtx); 2121 /* 2122 * This dbuf already exists. It is either CACHED, or 2123 * (we assume) about to be read or filled. 2124 */ 2125 return; 2126 } 2127 2128 /* 2129 * Find the closest ancestor (indirect block) of the target block 2130 * that is present in the cache. In this indirect block, we will 2131 * find the bp that is at curlevel, curblkid. 2132 */ 2133 curlevel = level; 2134 curblkid = blkid; 2135 while (curlevel < nlevels - 1) { 2136 int parent_level = curlevel + 1; 2137 uint64_t parent_blkid = curblkid >> epbs; 2138 dmu_buf_impl_t *db; 2139 2140 if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2141 FALSE, TRUE, FTAG, &db) == 0) { 2142 blkptr_t *bpp = db->db_buf->b_data; 2143 bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2144 dbuf_rele(db, FTAG); 2145 break; 2146 } 2147 2148 curlevel = parent_level; 2149 curblkid = parent_blkid; 2150 } 2151 2152 if (curlevel == nlevels - 1) { 2153 /* No cached indirect blocks found. */ 2154 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2155 bp = dn->dn_phys->dn_blkptr[curblkid]; 2156 } 2157 if (BP_IS_HOLE(&bp)) 2158 return; 2159 2160 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2161 2162 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2163 ZIO_FLAG_CANFAIL); 2164 2165 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2166 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2167 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2168 dn->dn_object, level, blkid); 2169 dpa->dpa_curlevel = curlevel; 2170 dpa->dpa_prio = prio; 2171 dpa->dpa_aflags = aflags; 2172 dpa->dpa_spa = dn->dn_objset->os_spa; 2173 dpa->dpa_epbs = epbs; 2174 dpa->dpa_zio = pio; 2175 2176 /* 2177 * If we have the indirect just above us, no need to do the asynchronous 2178 * prefetch chain; we'll just run the last step ourselves. If we're at 2179 * a higher level, though, we want to issue the prefetches for all the 2180 * indirect blocks asynchronously, so we can go on with whatever we were 2181 * doing. 2182 */ 2183 if (curlevel == level) { 2184 ASSERT3U(curblkid, ==, blkid); 2185 dbuf_issue_final_prefetch(dpa, &bp); 2186 kmem_free(dpa, sizeof (*dpa)); 2187 } else { 2188 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2189 zbookmark_phys_t zb; 2190 2191 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2192 dn->dn_object, curlevel, curblkid); 2193 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2194 &bp, dbuf_prefetch_indirect_done, dpa, prio, 2195 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2196 &iter_aflags, &zb); 2197 } 2198 /* 2199 * We use pio here instead of dpa_zio since it's possible that 2200 * dpa may have already been freed. 2201 */ 2202 zio_nowait(pio); 2203 } 2204 2205 /* 2206 * Returns with db_holds incremented, and db_mtx not held. 2207 * Note: dn_struct_rwlock must be held. 2208 */ 2209 int 2210 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2211 boolean_t fail_sparse, boolean_t fail_uncached, 2212 void *tag, dmu_buf_impl_t **dbp) 2213 { 2214 dmu_buf_impl_t *db, *parent = NULL; 2215 2216 ASSERT(blkid != DMU_BONUS_BLKID); 2217 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2218 ASSERT3U(dn->dn_nlevels, >, level); 2219 2220 *dbp = NULL; 2221 top: 2222 /* dbuf_find() returns with db_mtx held */ 2223 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2224 2225 if (db == NULL) { 2226 blkptr_t *bp = NULL; 2227 int err; 2228 2229 if (fail_uncached) 2230 return (SET_ERROR(ENOENT)); 2231 2232 ASSERT3P(parent, ==, NULL); 2233 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2234 if (fail_sparse) { 2235 if (err == 0 && bp && BP_IS_HOLE(bp)) 2236 err = SET_ERROR(ENOENT); 2237 if (err) { 2238 if (parent) 2239 dbuf_rele(parent, NULL); 2240 return (err); 2241 } 2242 } 2243 if (err && err != ENOENT) 2244 return (err); 2245 db = dbuf_create(dn, level, blkid, parent, bp); 2246 } 2247 2248 if (fail_uncached && db->db_state != DB_CACHED) { 2249 mutex_exit(&db->db_mtx); 2250 return (SET_ERROR(ENOENT)); 2251 } 2252 2253 if (db->db_buf && refcount_is_zero(&db->db_holds)) { 2254 arc_buf_add_ref(db->db_buf, db); 2255 if (db->db_buf->b_data == NULL) { 2256 dbuf_clear(db); 2257 if (parent) { 2258 dbuf_rele(parent, NULL); 2259 parent = NULL; 2260 } 2261 goto top; 2262 } 2263 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2264 } 2265 2266 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2267 2268 /* 2269 * If this buffer is currently syncing out, and we are are 2270 * still referencing it from db_data, we need to make a copy 2271 * of it in case we decide we want to dirty it again in this txg. 2272 */ 2273 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2274 dn->dn_object != DMU_META_DNODE_OBJECT && 2275 db->db_state == DB_CACHED && db->db_data_pending) { 2276 dbuf_dirty_record_t *dr = db->db_data_pending; 2277 2278 if (dr->dt.dl.dr_data == db->db_buf) { 2279 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2280 2281 dbuf_set_data(db, 2282 arc_buf_alloc(dn->dn_objset->os_spa, 2283 db->db.db_size, db, type)); 2284 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2285 db->db.db_size); 2286 } 2287 } 2288 2289 (void) refcount_add(&db->db_holds, tag); 2290 DBUF_VERIFY(db); 2291 mutex_exit(&db->db_mtx); 2292 2293 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2294 if (parent) 2295 dbuf_rele(parent, NULL); 2296 2297 ASSERT3P(DB_DNODE(db), ==, dn); 2298 ASSERT3U(db->db_blkid, ==, blkid); 2299 ASSERT3U(db->db_level, ==, level); 2300 *dbp = db; 2301 2302 return (0); 2303 } 2304 2305 dmu_buf_impl_t * 2306 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2307 { 2308 return (dbuf_hold_level(dn, 0, blkid, tag)); 2309 } 2310 2311 dmu_buf_impl_t * 2312 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2313 { 2314 dmu_buf_impl_t *db; 2315 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2316 return (err ? NULL : db); 2317 } 2318 2319 void 2320 dbuf_create_bonus(dnode_t *dn) 2321 { 2322 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2323 2324 ASSERT(dn->dn_bonus == NULL); 2325 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 2326 } 2327 2328 int 2329 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 2330 { 2331 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2332 dnode_t *dn; 2333 2334 if (db->db_blkid != DMU_SPILL_BLKID) 2335 return (SET_ERROR(ENOTSUP)); 2336 if (blksz == 0) 2337 blksz = SPA_MINBLOCKSIZE; 2338 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 2339 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 2340 2341 DB_DNODE_ENTER(db); 2342 dn = DB_DNODE(db); 2343 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 2344 dbuf_new_size(db, blksz, tx); 2345 rw_exit(&dn->dn_struct_rwlock); 2346 DB_DNODE_EXIT(db); 2347 2348 return (0); 2349 } 2350 2351 void 2352 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 2353 { 2354 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2355 } 2356 2357 #pragma weak dmu_buf_add_ref = dbuf_add_ref 2358 void 2359 dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2360 { 2361 int64_t holds = refcount_add(&db->db_holds, tag); 2362 ASSERT(holds > 1); 2363 } 2364 2365 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2366 boolean_t 2367 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2368 void *tag) 2369 { 2370 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2371 dmu_buf_impl_t *found_db; 2372 boolean_t result = B_FALSE; 2373 2374 if (db->db_blkid == DMU_BONUS_BLKID) 2375 found_db = dbuf_find_bonus(os, obj); 2376 else 2377 found_db = dbuf_find(os, obj, 0, blkid); 2378 2379 if (found_db != NULL) { 2380 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2381 (void) refcount_add(&db->db_holds, tag); 2382 result = B_TRUE; 2383 } 2384 mutex_exit(&db->db_mtx); 2385 } 2386 return (result); 2387 } 2388 2389 /* 2390 * If you call dbuf_rele() you had better not be referencing the dnode handle 2391 * unless you have some other direct or indirect hold on the dnode. (An indirect 2392 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2393 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2394 * dnode's parent dbuf evicting its dnode handles. 2395 */ 2396 void 2397 dbuf_rele(dmu_buf_impl_t *db, void *tag) 2398 { 2399 mutex_enter(&db->db_mtx); 2400 dbuf_rele_and_unlock(db, tag); 2401 } 2402 2403 void 2404 dmu_buf_rele(dmu_buf_t *db, void *tag) 2405 { 2406 dbuf_rele((dmu_buf_impl_t *)db, tag); 2407 } 2408 2409 /* 2410 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2411 * db_dirtycnt and db_holds to be updated atomically. 2412 */ 2413 void 2414 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2415 { 2416 int64_t holds; 2417 2418 ASSERT(MUTEX_HELD(&db->db_mtx)); 2419 DBUF_VERIFY(db); 2420 2421 /* 2422 * Remove the reference to the dbuf before removing its hold on the 2423 * dnode so we can guarantee in dnode_move() that a referenced bonus 2424 * buffer has a corresponding dnode hold. 2425 */ 2426 holds = refcount_remove(&db->db_holds, tag); 2427 ASSERT(holds >= 0); 2428 2429 /* 2430 * We can't freeze indirects if there is a possibility that they 2431 * may be modified in the current syncing context. 2432 */ 2433 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) 2434 arc_buf_freeze(db->db_buf); 2435 2436 if (holds == db->db_dirtycnt && 2437 db->db_level == 0 && db->db_user_immediate_evict) 2438 dbuf_evict_user(db); 2439 2440 if (holds == 0) { 2441 if (db->db_blkid == DMU_BONUS_BLKID) { 2442 dnode_t *dn; 2443 boolean_t evict_dbuf = db->db_pending_evict; 2444 2445 /* 2446 * If the dnode moves here, we cannot cross this 2447 * barrier until the move completes. 2448 */ 2449 DB_DNODE_ENTER(db); 2450 2451 dn = DB_DNODE(db); 2452 atomic_dec_32(&dn->dn_dbufs_count); 2453 2454 /* 2455 * Decrementing the dbuf count means that the bonus 2456 * buffer's dnode hold is no longer discounted in 2457 * dnode_move(). The dnode cannot move until after 2458 * the dnode_rele() below. 2459 */ 2460 DB_DNODE_EXIT(db); 2461 2462 /* 2463 * Do not reference db after its lock is dropped. 2464 * Another thread may evict it. 2465 */ 2466 mutex_exit(&db->db_mtx); 2467 2468 if (evict_dbuf) 2469 dnode_evict_bonus(dn); 2470 2471 dnode_rele(dn, db); 2472 } else if (db->db_buf == NULL) { 2473 /* 2474 * This is a special case: we never associated this 2475 * dbuf with any data allocated from the ARC. 2476 */ 2477 ASSERT(db->db_state == DB_UNCACHED || 2478 db->db_state == DB_NOFILL); 2479 dbuf_evict(db); 2480 } else if (arc_released(db->db_buf)) { 2481 arc_buf_t *buf = db->db_buf; 2482 /* 2483 * This dbuf has anonymous data associated with it. 2484 */ 2485 dbuf_clear_data(db); 2486 VERIFY(arc_buf_remove_ref(buf, db)); 2487 dbuf_evict(db); 2488 } else { 2489 VERIFY(!arc_buf_remove_ref(db->db_buf, db)); 2490 2491 /* 2492 * A dbuf will be eligible for eviction if either the 2493 * 'primarycache' property is set or a duplicate 2494 * copy of this buffer is already cached in the arc. 2495 * 2496 * In the case of the 'primarycache' a buffer 2497 * is considered for eviction if it matches the 2498 * criteria set in the property. 2499 * 2500 * To decide if our buffer is considered a 2501 * duplicate, we must call into the arc to determine 2502 * if multiple buffers are referencing the same 2503 * block on-disk. If so, then we simply evict 2504 * ourselves. 2505 */ 2506 if (!DBUF_IS_CACHEABLE(db)) { 2507 if (db->db_blkptr != NULL && 2508 !BP_IS_HOLE(db->db_blkptr) && 2509 !BP_IS_EMBEDDED(db->db_blkptr)) { 2510 spa_t *spa = 2511 dmu_objset_spa(db->db_objset); 2512 blkptr_t bp = *db->db_blkptr; 2513 dbuf_clear(db); 2514 arc_freed(spa, &bp); 2515 } else { 2516 dbuf_clear(db); 2517 } 2518 } else if (db->db_pending_evict || 2519 arc_buf_eviction_needed(db->db_buf)) { 2520 dbuf_clear(db); 2521 } else { 2522 mutex_exit(&db->db_mtx); 2523 } 2524 } 2525 } else { 2526 mutex_exit(&db->db_mtx); 2527 } 2528 } 2529 2530 #pragma weak dmu_buf_refcount = dbuf_refcount 2531 uint64_t 2532 dbuf_refcount(dmu_buf_impl_t *db) 2533 { 2534 return (refcount_count(&db->db_holds)); 2535 } 2536 2537 void * 2538 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2539 dmu_buf_user_t *new_user) 2540 { 2541 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2542 2543 mutex_enter(&db->db_mtx); 2544 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2545 if (db->db_user == old_user) 2546 db->db_user = new_user; 2547 else 2548 old_user = db->db_user; 2549 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2550 mutex_exit(&db->db_mtx); 2551 2552 return (old_user); 2553 } 2554 2555 void * 2556 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2557 { 2558 return (dmu_buf_replace_user(db_fake, NULL, user)); 2559 } 2560 2561 void * 2562 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2563 { 2564 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2565 2566 db->db_user_immediate_evict = TRUE; 2567 return (dmu_buf_set_user(db_fake, user)); 2568 } 2569 2570 void * 2571 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2572 { 2573 return (dmu_buf_replace_user(db_fake, user, NULL)); 2574 } 2575 2576 void * 2577 dmu_buf_get_user(dmu_buf_t *db_fake) 2578 { 2579 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2580 2581 dbuf_verify_user(db, DBVU_NOT_EVICTING); 2582 return (db->db_user); 2583 } 2584 2585 void 2586 dmu_buf_user_evict_wait() 2587 { 2588 taskq_wait(dbu_evict_taskq); 2589 } 2590 2591 boolean_t 2592 dmu_buf_freeable(dmu_buf_t *dbuf) 2593 { 2594 boolean_t res = B_FALSE; 2595 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2596 2597 if (db->db_blkptr) 2598 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset, 2599 db->db_blkptr, db->db_blkptr->blk_birth); 2600 2601 return (res); 2602 } 2603 2604 blkptr_t * 2605 dmu_buf_get_blkptr(dmu_buf_t *db) 2606 { 2607 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2608 return (dbi->db_blkptr); 2609 } 2610 2611 static void 2612 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2613 { 2614 /* ASSERT(dmu_tx_is_syncing(tx) */ 2615 ASSERT(MUTEX_HELD(&db->db_mtx)); 2616 2617 if (db->db_blkptr != NULL) 2618 return; 2619 2620 if (db->db_blkid == DMU_SPILL_BLKID) { 2621 db->db_blkptr = &dn->dn_phys->dn_spill; 2622 BP_ZERO(db->db_blkptr); 2623 return; 2624 } 2625 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2626 /* 2627 * This buffer was allocated at a time when there was 2628 * no available blkptrs from the dnode, or it was 2629 * inappropriate to hook it in (i.e., nlevels mis-match). 2630 */ 2631 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2632 ASSERT(db->db_parent == NULL); 2633 db->db_parent = dn->dn_dbuf; 2634 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2635 DBUF_VERIFY(db); 2636 } else { 2637 dmu_buf_impl_t *parent = db->db_parent; 2638 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2639 2640 ASSERT(dn->dn_phys->dn_nlevels > 1); 2641 if (parent == NULL) { 2642 mutex_exit(&db->db_mtx); 2643 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2644 parent = dbuf_hold_level(dn, db->db_level + 1, 2645 db->db_blkid >> epbs, db); 2646 rw_exit(&dn->dn_struct_rwlock); 2647 mutex_enter(&db->db_mtx); 2648 db->db_parent = parent; 2649 } 2650 db->db_blkptr = (blkptr_t *)parent->db.db_data + 2651 (db->db_blkid & ((1ULL << epbs) - 1)); 2652 DBUF_VERIFY(db); 2653 } 2654 } 2655 2656 static void 2657 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2658 { 2659 dmu_buf_impl_t *db = dr->dr_dbuf; 2660 dnode_t *dn; 2661 zio_t *zio; 2662 2663 ASSERT(dmu_tx_is_syncing(tx)); 2664 2665 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2666 2667 mutex_enter(&db->db_mtx); 2668 2669 ASSERT(db->db_level > 0); 2670 DBUF_VERIFY(db); 2671 2672 /* Read the block if it hasn't been read yet. */ 2673 if (db->db_buf == NULL) { 2674 mutex_exit(&db->db_mtx); 2675 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2676 mutex_enter(&db->db_mtx); 2677 } 2678 ASSERT3U(db->db_state, ==, DB_CACHED); 2679 ASSERT(db->db_buf != NULL); 2680 2681 DB_DNODE_ENTER(db); 2682 dn = DB_DNODE(db); 2683 /* Indirect block size must match what the dnode thinks it is. */ 2684 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2685 dbuf_check_blkptr(dn, db); 2686 DB_DNODE_EXIT(db); 2687 2688 /* Provide the pending dirty record to child dbufs */ 2689 db->db_data_pending = dr; 2690 2691 mutex_exit(&db->db_mtx); 2692 dbuf_write(dr, db->db_buf, tx); 2693 2694 zio = dr->dr_zio; 2695 mutex_enter(&dr->dt.di.dr_mtx); 2696 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 2697 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2698 mutex_exit(&dr->dt.di.dr_mtx); 2699 zio_nowait(zio); 2700 } 2701 2702 static void 2703 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2704 { 2705 arc_buf_t **datap = &dr->dt.dl.dr_data; 2706 dmu_buf_impl_t *db = dr->dr_dbuf; 2707 dnode_t *dn; 2708 objset_t *os; 2709 uint64_t txg = tx->tx_txg; 2710 2711 ASSERT(dmu_tx_is_syncing(tx)); 2712 2713 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2714 2715 mutex_enter(&db->db_mtx); 2716 /* 2717 * To be synced, we must be dirtied. But we 2718 * might have been freed after the dirty. 2719 */ 2720 if (db->db_state == DB_UNCACHED) { 2721 /* This buffer has been freed since it was dirtied */ 2722 ASSERT(db->db.db_data == NULL); 2723 } else if (db->db_state == DB_FILL) { 2724 /* This buffer was freed and is now being re-filled */ 2725 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 2726 } else { 2727 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 2728 } 2729 DBUF_VERIFY(db); 2730 2731 DB_DNODE_ENTER(db); 2732 dn = DB_DNODE(db); 2733 2734 if (db->db_blkid == DMU_SPILL_BLKID) { 2735 mutex_enter(&dn->dn_mtx); 2736 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 2737 mutex_exit(&dn->dn_mtx); 2738 } 2739 2740 /* 2741 * If this is a bonus buffer, simply copy the bonus data into the 2742 * dnode. It will be written out when the dnode is synced (and it 2743 * will be synced, since it must have been dirty for dbuf_sync to 2744 * be called). 2745 */ 2746 if (db->db_blkid == DMU_BONUS_BLKID) { 2747 dbuf_dirty_record_t **drp; 2748 2749 ASSERT(*datap != NULL); 2750 ASSERT0(db->db_level); 2751 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 2752 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 2753 DB_DNODE_EXIT(db); 2754 2755 if (*datap != db->db.db_data) { 2756 zio_buf_free(*datap, DN_MAX_BONUSLEN); 2757 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2758 } 2759 db->db_data_pending = NULL; 2760 drp = &db->db_last_dirty; 2761 while (*drp != dr) 2762 drp = &(*drp)->dr_next; 2763 ASSERT(dr->dr_next == NULL); 2764 ASSERT(dr->dr_dbuf == db); 2765 *drp = dr->dr_next; 2766 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2767 ASSERT(db->db_dirtycnt > 0); 2768 db->db_dirtycnt -= 1; 2769 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 2770 return; 2771 } 2772 2773 os = dn->dn_objset; 2774 2775 /* 2776 * This function may have dropped the db_mtx lock allowing a dmu_sync 2777 * operation to sneak in. As a result, we need to ensure that we 2778 * don't check the dr_override_state until we have returned from 2779 * dbuf_check_blkptr. 2780 */ 2781 dbuf_check_blkptr(dn, db); 2782 2783 /* 2784 * If this buffer is in the middle of an immediate write, 2785 * wait for the synchronous IO to complete. 2786 */ 2787 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 2788 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 2789 cv_wait(&db->db_changed, &db->db_mtx); 2790 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 2791 } 2792 2793 if (db->db_state != DB_NOFILL && 2794 dn->dn_object != DMU_META_DNODE_OBJECT && 2795 refcount_count(&db->db_holds) > 1 && 2796 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 2797 *datap == db->db_buf) { 2798 /* 2799 * If this buffer is currently "in use" (i.e., there 2800 * are active holds and db_data still references it), 2801 * then make a copy before we start the write so that 2802 * any modifications from the open txg will not leak 2803 * into this write. 2804 * 2805 * NOTE: this copy does not need to be made for 2806 * objects only modified in the syncing context (e.g. 2807 * DNONE_DNODE blocks). 2808 */ 2809 int blksz = arc_buf_size(*datap); 2810 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2811 *datap = arc_buf_alloc(os->os_spa, blksz, db, type); 2812 bcopy(db->db.db_data, (*datap)->b_data, blksz); 2813 } 2814 db->db_data_pending = dr; 2815 2816 mutex_exit(&db->db_mtx); 2817 2818 dbuf_write(dr, *datap, tx); 2819 2820 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2821 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 2822 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 2823 DB_DNODE_EXIT(db); 2824 } else { 2825 /* 2826 * Although zio_nowait() does not "wait for an IO", it does 2827 * initiate the IO. If this is an empty write it seems plausible 2828 * that the IO could actually be completed before the nowait 2829 * returns. We need to DB_DNODE_EXIT() first in case 2830 * zio_nowait() invalidates the dbuf. 2831 */ 2832 DB_DNODE_EXIT(db); 2833 zio_nowait(dr->dr_zio); 2834 } 2835 } 2836 2837 void 2838 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 2839 { 2840 dbuf_dirty_record_t *dr; 2841 2842 while (dr = list_head(list)) { 2843 if (dr->dr_zio != NULL) { 2844 /* 2845 * If we find an already initialized zio then we 2846 * are processing the meta-dnode, and we have finished. 2847 * The dbufs for all dnodes are put back on the list 2848 * during processing, so that we can zio_wait() 2849 * these IOs after initiating all child IOs. 2850 */ 2851 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 2852 DMU_META_DNODE_OBJECT); 2853 break; 2854 } 2855 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 2856 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 2857 VERIFY3U(dr->dr_dbuf->db_level, ==, level); 2858 } 2859 list_remove(list, dr); 2860 if (dr->dr_dbuf->db_level > 0) 2861 dbuf_sync_indirect(dr, tx); 2862 else 2863 dbuf_sync_leaf(dr, tx); 2864 } 2865 } 2866 2867 /* ARGSUSED */ 2868 static void 2869 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 2870 { 2871 dmu_buf_impl_t *db = vdb; 2872 dnode_t *dn; 2873 blkptr_t *bp = zio->io_bp; 2874 blkptr_t *bp_orig = &zio->io_bp_orig; 2875 spa_t *spa = zio->io_spa; 2876 int64_t delta; 2877 uint64_t fill = 0; 2878 int i; 2879 2880 ASSERT3P(db->db_blkptr, ==, bp); 2881 2882 DB_DNODE_ENTER(db); 2883 dn = DB_DNODE(db); 2884 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 2885 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 2886 zio->io_prev_space_delta = delta; 2887 2888 if (bp->blk_birth != 0) { 2889 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 2890 BP_GET_TYPE(bp) == dn->dn_type) || 2891 (db->db_blkid == DMU_SPILL_BLKID && 2892 BP_GET_TYPE(bp) == dn->dn_bonustype) || 2893 BP_IS_EMBEDDED(bp)); 2894 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 2895 } 2896 2897 mutex_enter(&db->db_mtx); 2898 2899 #ifdef ZFS_DEBUG 2900 if (db->db_blkid == DMU_SPILL_BLKID) { 2901 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 2902 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 2903 db->db_blkptr == &dn->dn_phys->dn_spill); 2904 } 2905 #endif 2906 2907 if (db->db_level == 0) { 2908 mutex_enter(&dn->dn_mtx); 2909 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 2910 db->db_blkid != DMU_SPILL_BLKID) 2911 dn->dn_phys->dn_maxblkid = db->db_blkid; 2912 mutex_exit(&dn->dn_mtx); 2913 2914 if (dn->dn_type == DMU_OT_DNODE) { 2915 dnode_phys_t *dnp = db->db.db_data; 2916 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 2917 i--, dnp++) { 2918 if (dnp->dn_type != DMU_OT_NONE) 2919 fill++; 2920 } 2921 } else { 2922 if (BP_IS_HOLE(bp)) { 2923 fill = 0; 2924 } else { 2925 fill = 1; 2926 } 2927 } 2928 } else { 2929 blkptr_t *ibp = db->db.db_data; 2930 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2931 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 2932 if (BP_IS_HOLE(ibp)) 2933 continue; 2934 fill += BP_GET_FILL(ibp); 2935 } 2936 } 2937 DB_DNODE_EXIT(db); 2938 2939 if (!BP_IS_EMBEDDED(bp)) 2940 bp->blk_fill = fill; 2941 2942 mutex_exit(&db->db_mtx); 2943 } 2944 2945 /* 2946 * The SPA will call this callback several times for each zio - once 2947 * for every physical child i/o (zio->io_phys_children times). This 2948 * allows the DMU to monitor the progress of each logical i/o. For example, 2949 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 2950 * block. There may be a long delay before all copies/fragments are completed, 2951 * so this callback allows us to retire dirty space gradually, as the physical 2952 * i/os complete. 2953 */ 2954 /* ARGSUSED */ 2955 static void 2956 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 2957 { 2958 dmu_buf_impl_t *db = arg; 2959 objset_t *os = db->db_objset; 2960 dsl_pool_t *dp = dmu_objset_pool(os); 2961 dbuf_dirty_record_t *dr; 2962 int delta = 0; 2963 2964 dr = db->db_data_pending; 2965 ASSERT3U(dr->dr_txg, ==, zio->io_txg); 2966 2967 /* 2968 * The callback will be called io_phys_children times. Retire one 2969 * portion of our dirty space each time we are called. Any rounding 2970 * error will be cleaned up by dsl_pool_sync()'s call to 2971 * dsl_pool_undirty_space(). 2972 */ 2973 delta = dr->dr_accounted / zio->io_phys_children; 2974 dsl_pool_undirty_space(dp, delta, zio->io_txg); 2975 } 2976 2977 /* ARGSUSED */ 2978 static void 2979 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 2980 { 2981 dmu_buf_impl_t *db = vdb; 2982 blkptr_t *bp_orig = &zio->io_bp_orig; 2983 blkptr_t *bp = db->db_blkptr; 2984 objset_t *os = db->db_objset; 2985 dmu_tx_t *tx = os->os_synctx; 2986 dbuf_dirty_record_t **drp, *dr; 2987 2988 ASSERT0(zio->io_error); 2989 ASSERT(db->db_blkptr == bp); 2990 2991 /* 2992 * For nopwrites and rewrites we ensure that the bp matches our 2993 * original and bypass all the accounting. 2994 */ 2995 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 2996 ASSERT(BP_EQUAL(bp, bp_orig)); 2997 } else { 2998 dsl_dataset_t *ds = os->os_dsl_dataset; 2999 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3000 dsl_dataset_block_born(ds, bp, tx); 3001 } 3002 3003 mutex_enter(&db->db_mtx); 3004 3005 DBUF_VERIFY(db); 3006 3007 drp = &db->db_last_dirty; 3008 while ((dr = *drp) != db->db_data_pending) 3009 drp = &dr->dr_next; 3010 ASSERT(!list_link_active(&dr->dr_dirty_node)); 3011 ASSERT(dr->dr_dbuf == db); 3012 ASSERT(dr->dr_next == NULL); 3013 *drp = dr->dr_next; 3014 3015 #ifdef ZFS_DEBUG 3016 if (db->db_blkid == DMU_SPILL_BLKID) { 3017 dnode_t *dn; 3018 3019 DB_DNODE_ENTER(db); 3020 dn = DB_DNODE(db); 3021 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 3022 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 3023 db->db_blkptr == &dn->dn_phys->dn_spill); 3024 DB_DNODE_EXIT(db); 3025 } 3026 #endif 3027 3028 if (db->db_level == 0) { 3029 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3030 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 3031 if (db->db_state != DB_NOFILL) { 3032 if (dr->dt.dl.dr_data != db->db_buf) 3033 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 3034 db)); 3035 else if (!arc_released(db->db_buf)) 3036 arc_set_callback(db->db_buf, dbuf_do_evict, db); 3037 } 3038 } else { 3039 dnode_t *dn; 3040 3041 DB_DNODE_ENTER(db); 3042 dn = DB_DNODE(db); 3043 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3044 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3045 if (!BP_IS_HOLE(db->db_blkptr)) { 3046 int epbs = 3047 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3048 ASSERT3U(db->db_blkid, <=, 3049 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3050 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3051 db->db.db_size); 3052 if (!arc_released(db->db_buf)) 3053 arc_set_callback(db->db_buf, dbuf_do_evict, db); 3054 } 3055 DB_DNODE_EXIT(db); 3056 mutex_destroy(&dr->dt.di.dr_mtx); 3057 list_destroy(&dr->dt.di.dr_children); 3058 } 3059 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3060 3061 cv_broadcast(&db->db_changed); 3062 ASSERT(db->db_dirtycnt > 0); 3063 db->db_dirtycnt -= 1; 3064 db->db_data_pending = NULL; 3065 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3066 } 3067 3068 static void 3069 dbuf_write_nofill_ready(zio_t *zio) 3070 { 3071 dbuf_write_ready(zio, NULL, zio->io_private); 3072 } 3073 3074 static void 3075 dbuf_write_nofill_done(zio_t *zio) 3076 { 3077 dbuf_write_done(zio, NULL, zio->io_private); 3078 } 3079 3080 static void 3081 dbuf_write_override_ready(zio_t *zio) 3082 { 3083 dbuf_dirty_record_t *dr = zio->io_private; 3084 dmu_buf_impl_t *db = dr->dr_dbuf; 3085 3086 dbuf_write_ready(zio, NULL, db); 3087 } 3088 3089 static void 3090 dbuf_write_override_done(zio_t *zio) 3091 { 3092 dbuf_dirty_record_t *dr = zio->io_private; 3093 dmu_buf_impl_t *db = dr->dr_dbuf; 3094 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3095 3096 mutex_enter(&db->db_mtx); 3097 if (!BP_EQUAL(zio->io_bp, obp)) { 3098 if (!BP_IS_HOLE(obp)) 3099 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3100 arc_release(dr->dt.dl.dr_data, db); 3101 } 3102 mutex_exit(&db->db_mtx); 3103 3104 dbuf_write_done(zio, NULL, db); 3105 } 3106 3107 /* Issue I/O to commit a dirty buffer to disk. */ 3108 static void 3109 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3110 { 3111 dmu_buf_impl_t *db = dr->dr_dbuf; 3112 dnode_t *dn; 3113 objset_t *os; 3114 dmu_buf_impl_t *parent = db->db_parent; 3115 uint64_t txg = tx->tx_txg; 3116 zbookmark_phys_t zb; 3117 zio_prop_t zp; 3118 zio_t *zio; 3119 int wp_flag = 0; 3120 3121 DB_DNODE_ENTER(db); 3122 dn = DB_DNODE(db); 3123 os = dn->dn_objset; 3124 3125 if (db->db_state != DB_NOFILL) { 3126 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3127 /* 3128 * Private object buffers are released here rather 3129 * than in dbuf_dirty() since they are only modified 3130 * in the syncing context and we don't want the 3131 * overhead of making multiple copies of the data. 3132 */ 3133 if (BP_IS_HOLE(db->db_blkptr)) { 3134 arc_buf_thaw(data); 3135 } else { 3136 dbuf_release_bp(db); 3137 } 3138 } 3139 } 3140 3141 if (parent != dn->dn_dbuf) { 3142 /* Our parent is an indirect block. */ 3143 /* We have a dirty parent that has been scheduled for write. */ 3144 ASSERT(parent && parent->db_data_pending); 3145 /* Our parent's buffer is one level closer to the dnode. */ 3146 ASSERT(db->db_level == parent->db_level-1); 3147 /* 3148 * We're about to modify our parent's db_data by modifying 3149 * our block pointer, so the parent must be released. 3150 */ 3151 ASSERT(arc_released(parent->db_buf)); 3152 zio = parent->db_data_pending->dr_zio; 3153 } else { 3154 /* Our parent is the dnode itself. */ 3155 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 3156 db->db_blkid != DMU_SPILL_BLKID) || 3157 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 3158 if (db->db_blkid != DMU_SPILL_BLKID) 3159 ASSERT3P(db->db_blkptr, ==, 3160 &dn->dn_phys->dn_blkptr[db->db_blkid]); 3161 zio = dn->dn_zio; 3162 } 3163 3164 ASSERT(db->db_level == 0 || data == db->db_buf); 3165 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3166 ASSERT(zio); 3167 3168 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3169 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3170 db->db.db_object, db->db_level, db->db_blkid); 3171 3172 if (db->db_blkid == DMU_SPILL_BLKID) 3173 wp_flag = WP_SPILL; 3174 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 3175 3176 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3177 DB_DNODE_EXIT(db); 3178 3179 if (db->db_level == 0 && 3180 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 3181 /* 3182 * The BP for this block has been provided by open context 3183 * (by dmu_sync() or dmu_buf_write_embedded()). 3184 */ 3185 void *contents = (data != NULL) ? data->b_data : NULL; 3186 3187 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3188 db->db_blkptr, contents, db->db.db_size, &zp, 3189 dbuf_write_override_ready, NULL, dbuf_write_override_done, 3190 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3191 mutex_enter(&db->db_mtx); 3192 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3193 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 3194 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3195 mutex_exit(&db->db_mtx); 3196 } else if (db->db_state == DB_NOFILL) { 3197 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3198 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3199 dr->dr_zio = zio_write(zio, os->os_spa, txg, 3200 db->db_blkptr, NULL, db->db.db_size, &zp, 3201 dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db, 3202 ZIO_PRIORITY_ASYNC_WRITE, 3203 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3204 } else { 3205 ASSERT(arc_released(data)); 3206 dr->dr_zio = arc_write(zio, os->os_spa, txg, 3207 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), 3208 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready, 3209 dbuf_write_physdone, dbuf_write_done, db, 3210 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3211 } 3212 } 3213