1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/dbuf.h> 30 #include <sys/dnode.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dmu_objset.h> 34 #include <sys/dsl_dataset.h> 35 #include <sys/spa.h> 36 37 static void 38 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 39 { 40 dmu_buf_impl_t *db; 41 int txgoff = tx->tx_txg & TXG_MASK; 42 int nblkptr = dn->dn_phys->dn_nblkptr; 43 int old_toplvl = dn->dn_phys->dn_nlevels - 1; 44 int new_level = dn->dn_next_nlevels[txgoff]; 45 int i; 46 47 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 48 49 /* this dnode can't be paged out because it's dirty */ 50 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 51 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 52 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); 53 54 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 55 ASSERT(db != NULL); 56 57 dn->dn_phys->dn_nlevels = new_level; 58 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, 59 dn->dn_object, dn->dn_phys->dn_nlevels); 60 61 /* check for existing blkptrs in the dnode */ 62 for (i = 0; i < nblkptr; i++) 63 if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i])) 64 break; 65 if (i != nblkptr) { 66 /* transfer dnode's block pointers to new indirect block */ 67 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); 68 ASSERT(db->db.db_data); 69 ASSERT(arc_released(db->db_buf)); 70 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); 71 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 72 sizeof (blkptr_t) * nblkptr); 73 arc_buf_freeze(db->db_buf); 74 } 75 76 /* set dbuf's parent pointers to new indirect buf */ 77 for (i = 0; i < nblkptr; i++) { 78 dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i); 79 80 if (child == NULL) 81 continue; 82 ASSERT3P(child->db_dnode, ==, dn); 83 if (child->db_parent && child->db_parent != dn->dn_dbuf) { 84 ASSERT(child->db_parent->db_level == db->db_level); 85 ASSERT(child->db_blkptr != 86 &dn->dn_phys->dn_blkptr[child->db_blkid]); 87 mutex_exit(&child->db_mtx); 88 continue; 89 } 90 ASSERT(child->db_parent == NULL || 91 child->db_parent == dn->dn_dbuf); 92 93 child->db_parent = db; 94 dbuf_add_ref(db, child); 95 if (db->db.db_data) 96 child->db_blkptr = (blkptr_t *)db->db.db_data + i; 97 else 98 child->db_blkptr = NULL; 99 dprintf_dbuf_bp(child, child->db_blkptr, 100 "changed db_blkptr to new indirect %s", ""); 101 102 mutex_exit(&child->db_mtx); 103 } 104 105 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); 106 107 dbuf_rele(db, FTAG); 108 109 rw_exit(&dn->dn_struct_rwlock); 110 } 111 112 static int 113 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 114 { 115 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 116 uint64_t bytesfreed = 0; 117 int i, blocks_freed = 0; 118 119 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); 120 121 for (i = 0; i < num; i++, bp++) { 122 if (BP_IS_HOLE(bp)) 123 continue; 124 125 bytesfreed += dsl_dataset_block_kill(ds, bp, dn->dn_zio, tx); 126 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); 127 bzero(bp, sizeof (blkptr_t)); 128 blocks_freed += 1; 129 } 130 dnode_diduse_space(dn, -bytesfreed); 131 return (blocks_freed); 132 } 133 134 #ifdef ZFS_DEBUG 135 static void 136 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 137 { 138 int off, num; 139 int i, err, epbs; 140 uint64_t txg = tx->tx_txg; 141 142 epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 143 off = start - (db->db_blkid * 1<<epbs); 144 num = end - start + 1; 145 146 ASSERT3U(off, >=, 0); 147 ASSERT3U(num, >=, 0); 148 ASSERT3U(db->db_level, >, 0); 149 ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift); 150 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 151 ASSERT(db->db_blkptr != NULL); 152 153 for (i = off; i < off+num; i++) { 154 uint64_t *buf; 155 dmu_buf_impl_t *child; 156 dbuf_dirty_record_t *dr; 157 int j; 158 159 ASSERT(db->db_level == 1); 160 161 rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER); 162 err = dbuf_hold_impl(db->db_dnode, db->db_level-1, 163 (db->db_blkid << epbs) + i, TRUE, FTAG, &child); 164 rw_exit(&db->db_dnode->dn_struct_rwlock); 165 if (err == ENOENT) 166 continue; 167 ASSERT(err == 0); 168 ASSERT(child->db_level == 0); 169 dr = child->db_last_dirty; 170 while (dr && dr->dr_txg > txg) 171 dr = dr->dr_next; 172 ASSERT(dr == NULL || dr->dr_txg == txg); 173 174 /* data_old better be zeroed */ 175 if (dr) { 176 buf = dr->dt.dl.dr_data->b_data; 177 for (j = 0; j < child->db.db_size >> 3; j++) { 178 if (buf[j] != 0) { 179 panic("freed data not zero: " 180 "child=%p i=%d off=%d num=%d\n", 181 child, i, off, num); 182 } 183 } 184 } 185 186 /* 187 * db_data better be zeroed unless it's dirty in a 188 * future txg. 189 */ 190 mutex_enter(&child->db_mtx); 191 buf = child->db.db_data; 192 if (buf != NULL && child->db_state != DB_FILL && 193 child->db_last_dirty == NULL) { 194 for (j = 0; j < child->db.db_size >> 3; j++) { 195 if (buf[j] != 0) { 196 panic("freed data not zero: " 197 "child=%p i=%d off=%d num=%d\n", 198 child, i, off, num); 199 } 200 } 201 } 202 mutex_exit(&child->db_mtx); 203 204 dbuf_rele(child, FTAG); 205 } 206 } 207 #endif 208 209 #define ALL -1 210 211 static int 212 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc, 213 dmu_tx_t *tx) 214 { 215 dnode_t *dn = db->db_dnode; 216 blkptr_t *bp; 217 dmu_buf_impl_t *subdb; 218 uint64_t start, end, dbstart, dbend, i; 219 int epbs, shift, err; 220 int all = TRUE; 221 int blocks_freed = 0; 222 223 /* 224 * There is a small possibility that this block will not be cached: 225 * 1 - if level > 1 and there are no children with level <= 1 226 * 2 - if we didn't get a dirty hold (because this block had just 227 * finished being written -- and so had no holds), and then this 228 * block got evicted before we got here. 229 */ 230 if (db->db_state != DB_CACHED) 231 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 232 233 arc_release(db->db_buf, db); 234 bp = (blkptr_t *)db->db.db_data; 235 236 epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 237 shift = (db->db_level - 1) * epbs; 238 dbstart = db->db_blkid << epbs; 239 start = blkid >> shift; 240 if (dbstart < start) { 241 bp += start - dbstart; 242 all = FALSE; 243 } else { 244 start = dbstart; 245 } 246 dbend = ((db->db_blkid + 1) << epbs) - 1; 247 end = (blkid + nblks - 1) >> shift; 248 if (dbend <= end) 249 end = dbend; 250 else if (all) 251 all = trunc; 252 ASSERT3U(start, <=, end); 253 254 if (db->db_level == 1) { 255 FREE_VERIFY(db, start, end, tx); 256 blocks_freed = free_blocks(dn, bp, end-start+1, tx); 257 arc_buf_freeze(db->db_buf); 258 ASSERT(all || blocks_freed == 0 || db->db_last_dirty); 259 return (all ? ALL : blocks_freed); 260 } 261 262 for (i = start; i <= end; i++, bp++) { 263 if (BP_IS_HOLE(bp)) 264 continue; 265 rw_enter(&dn->dn_struct_rwlock, RW_READER); 266 err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb); 267 ASSERT3U(err, ==, 0); 268 rw_exit(&dn->dn_struct_rwlock); 269 270 if (free_children(subdb, blkid, nblks, trunc, tx) == ALL) { 271 ASSERT3P(subdb->db_blkptr, ==, bp); 272 blocks_freed += free_blocks(dn, bp, 1, tx); 273 } else { 274 all = FALSE; 275 } 276 dbuf_rele(subdb, FTAG); 277 } 278 arc_buf_freeze(db->db_buf); 279 #ifdef ZFS_DEBUG 280 bp -= (end-start)+1; 281 for (i = start; i <= end; i++, bp++) { 282 if (i == start && blkid != 0) 283 continue; 284 else if (i == end && !trunc) 285 continue; 286 ASSERT3U(bp->blk_birth, ==, 0); 287 } 288 #endif 289 ASSERT(all || blocks_freed == 0 || db->db_last_dirty); 290 return (all ? ALL : blocks_freed); 291 } 292 293 /* 294 * free_range: Traverse the indicated range of the provided file 295 * and "free" all the blocks contained there. 296 */ 297 static void 298 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx) 299 { 300 blkptr_t *bp = dn->dn_phys->dn_blkptr; 301 dmu_buf_impl_t *db; 302 int trunc, start, end, shift, i, err; 303 int dnlevel = dn->dn_phys->dn_nlevels; 304 305 if (blkid > dn->dn_phys->dn_maxblkid) 306 return; 307 308 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 309 trunc = blkid + nblks > dn->dn_phys->dn_maxblkid; 310 if (trunc) 311 nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 312 313 /* There are no indirect blocks in the object */ 314 if (dnlevel == 1) { 315 if (blkid >= dn->dn_phys->dn_nblkptr) { 316 /* this range was never made persistent */ 317 return; 318 } 319 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 320 (void) free_blocks(dn, bp + blkid, nblks, tx); 321 if (trunc) { 322 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 323 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 324 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0); 325 ASSERT(off < dn->dn_phys->dn_maxblkid || 326 dn->dn_phys->dn_maxblkid == 0 || 327 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 328 } 329 return; 330 } 331 332 shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 333 start = blkid >> shift; 334 ASSERT(start < dn->dn_phys->dn_nblkptr); 335 end = (blkid + nblks - 1) >> shift; 336 bp += start; 337 for (i = start; i <= end; i++, bp++) { 338 if (BP_IS_HOLE(bp)) 339 continue; 340 rw_enter(&dn->dn_struct_rwlock, RW_READER); 341 err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db); 342 ASSERT3U(err, ==, 0); 343 rw_exit(&dn->dn_struct_rwlock); 344 345 if (free_children(db, blkid, nblks, trunc, tx) == ALL) { 346 ASSERT3P(db->db_blkptr, ==, bp); 347 (void) free_blocks(dn, bp, 1, tx); 348 } 349 dbuf_rele(db, FTAG); 350 } 351 if (trunc) { 352 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 353 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 354 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0); 355 ASSERT(off < dn->dn_phys->dn_maxblkid || 356 dn->dn_phys->dn_maxblkid == 0 || 357 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 358 } 359 } 360 361 /* 362 * Try to kick all the dnodes dbufs out of the cache... 363 */ 364 void 365 dnode_evict_dbufs(dnode_t *dn) 366 { 367 int progress; 368 int pass = 0; 369 370 do { 371 dmu_buf_impl_t *db, marker; 372 int evicting = FALSE; 373 374 progress = FALSE; 375 mutex_enter(&dn->dn_dbufs_mtx); 376 list_insert_tail(&dn->dn_dbufs, &marker); 377 db = list_head(&dn->dn_dbufs); 378 for (; db != ▮ db = list_head(&dn->dn_dbufs)) { 379 list_remove(&dn->dn_dbufs, db); 380 list_insert_tail(&dn->dn_dbufs, db); 381 ASSERT3P(db->db_dnode, ==, dn); 382 383 mutex_enter(&db->db_mtx); 384 if (db->db_state == DB_EVICTING) { 385 progress = TRUE; 386 evicting = TRUE; 387 mutex_exit(&db->db_mtx); 388 } else if (refcount_is_zero(&db->db_holds)) { 389 progress = TRUE; 390 dbuf_clear(db); /* exits db_mtx for us */ 391 } else { 392 mutex_exit(&db->db_mtx); 393 } 394 395 } 396 list_remove(&dn->dn_dbufs, &marker); 397 /* 398 * NB: we need to drop dn_dbufs_mtx between passes so 399 * that any DB_EVICTING dbufs can make progress. 400 * Ideally, we would have some cv we could wait on, but 401 * since we don't, just wait a bit to give the other 402 * thread a chance to run. 403 */ 404 mutex_exit(&dn->dn_dbufs_mtx); 405 if (evicting) 406 delay(1); 407 pass++; 408 ASSERT(pass < 100); /* sanity check */ 409 } while (progress); 410 411 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 412 if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) { 413 mutex_enter(&dn->dn_bonus->db_mtx); 414 dbuf_evict(dn->dn_bonus); 415 dn->dn_bonus = NULL; 416 } 417 rw_exit(&dn->dn_struct_rwlock); 418 } 419 420 static void 421 dnode_undirty_dbufs(list_t *list) 422 { 423 dbuf_dirty_record_t *dr; 424 425 while (dr = list_head(list)) { 426 dmu_buf_impl_t *db = dr->dr_dbuf; 427 uint64_t txg = dr->dr_txg; 428 429 mutex_enter(&db->db_mtx); 430 /* XXX - use dbuf_undirty()? */ 431 list_remove(list, dr); 432 ASSERT(db->db_last_dirty == dr); 433 db->db_last_dirty = NULL; 434 db->db_dirtycnt -= 1; 435 if (db->db_level == 0) { 436 ASSERT(db->db_blkid == DB_BONUS_BLKID || 437 dr->dt.dl.dr_data == db->db_buf); 438 dbuf_unoverride(dr); 439 mutex_exit(&db->db_mtx); 440 } else { 441 mutex_exit(&db->db_mtx); 442 dnode_undirty_dbufs(&dr->dt.di.dr_children); 443 } 444 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 445 dbuf_rele(db, (void *)(uintptr_t)txg); 446 } 447 } 448 449 static void 450 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 451 { 452 int txgoff = tx->tx_txg & TXG_MASK; 453 454 ASSERT(dmu_tx_is_syncing(tx)); 455 456 /* 457 * Our contents should have been freed in dnode_sync() by the 458 * free range record inserted by the caller of dnode_free(). 459 */ 460 ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0); 461 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); 462 463 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); 464 dnode_evict_dbufs(dn); 465 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL); 466 467 /* 468 * XXX - It would be nice to assert this, but we may still 469 * have residual holds from async evictions from the arc... 470 * 471 * zfs_obj_to_path() also depends on this being 472 * commented out. 473 * 474 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); 475 */ 476 477 /* Undirty next bits */ 478 dn->dn_next_nlevels[txgoff] = 0; 479 dn->dn_next_indblkshift[txgoff] = 0; 480 dn->dn_next_blksz[txgoff] = 0; 481 482 /* ASSERT(blkptrs are zero); */ 483 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 484 ASSERT(dn->dn_type != DMU_OT_NONE); 485 486 ASSERT(dn->dn_free_txg > 0); 487 if (dn->dn_allocated_txg != dn->dn_free_txg) 488 dbuf_will_dirty(dn->dn_dbuf, tx); 489 bzero(dn->dn_phys, sizeof (dnode_phys_t)); 490 491 mutex_enter(&dn->dn_mtx); 492 dn->dn_type = DMU_OT_NONE; 493 dn->dn_maxblkid = 0; 494 dn->dn_allocated_txg = 0; 495 dn->dn_free_txg = 0; 496 mutex_exit(&dn->dn_mtx); 497 498 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 499 500 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 501 /* 502 * Now that we've released our hold, the dnode may 503 * be evicted, so we musn't access it. 504 */ 505 } 506 507 /* 508 * Write out the dnode's dirty buffers. 509 * 510 * NOTE: The dnode is kept in memory by being dirty. Once the 511 * dirty bit is cleared, it may be evicted. Beware of this! 512 */ 513 void 514 dnode_sync(dnode_t *dn, dmu_tx_t *tx) 515 { 516 free_range_t *rp; 517 dnode_phys_t *dnp = dn->dn_phys; 518 int txgoff = tx->tx_txg & TXG_MASK; 519 list_t *list = &dn->dn_dirty_records[txgoff]; 520 521 ASSERT(dmu_tx_is_syncing(tx)); 522 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 523 DNODE_VERIFY(dn); 524 525 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); 526 527 mutex_enter(&dn->dn_mtx); 528 if (dn->dn_allocated_txg == tx->tx_txg) { 529 /* The dnode is newly allocated or reallocated */ 530 if (dnp->dn_type == DMU_OT_NONE) { 531 /* this is a first alloc, not a realloc */ 532 /* XXX shouldn't the phys already be zeroed? */ 533 bzero(dnp, DNODE_CORE_SIZE); 534 dnp->dn_nlevels = 1; 535 } 536 537 if (dn->dn_nblkptr > dnp->dn_nblkptr) { 538 /* zero the new blkptrs we are gaining */ 539 bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 540 sizeof (blkptr_t) * 541 (dn->dn_nblkptr - dnp->dn_nblkptr)); 542 } 543 dnp->dn_type = dn->dn_type; 544 dnp->dn_bonustype = dn->dn_bonustype; 545 dnp->dn_bonuslen = dn->dn_bonuslen; 546 dnp->dn_nblkptr = dn->dn_nblkptr; 547 } 548 549 ASSERT(dnp->dn_nlevels > 1 || 550 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 551 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 552 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 553 554 if (dn->dn_next_blksz[txgoff]) { 555 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], 556 SPA_MINBLOCKSIZE) == 0); 557 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || 558 dn->dn_maxblkid == 0 || list_head(list) != NULL || 559 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == 560 dnp->dn_datablkszsec); 561 dnp->dn_datablkszsec = 562 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; 563 dn->dn_next_blksz[txgoff] = 0; 564 } 565 566 if (dn->dn_next_bonuslen[txgoff]) { 567 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) 568 dnp->dn_bonuslen = 0; 569 else 570 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; 571 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN); 572 dn->dn_next_bonuslen[txgoff] = 0; 573 } 574 575 if (dn->dn_next_indblkshift[txgoff]) { 576 ASSERT(dnp->dn_nlevels == 1); 577 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 578 dn->dn_next_indblkshift[txgoff] = 0; 579 } 580 581 /* 582 * Just take the live (open-context) values for checksum and compress. 583 * Strictly speaking it's a future leak, but nothing bad happens if we 584 * start using the new checksum or compress algorithm a little early. 585 */ 586 dnp->dn_checksum = dn->dn_checksum; 587 dnp->dn_compress = dn->dn_compress; 588 589 mutex_exit(&dn->dn_mtx); 590 591 /* process all the "freed" ranges in the file */ 592 while (rp = avl_last(&dn->dn_ranges[txgoff])) { 593 dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx); 594 /* grab the mutex so we don't race with dnode_block_freed() */ 595 mutex_enter(&dn->dn_mtx); 596 avl_remove(&dn->dn_ranges[txgoff], rp); 597 mutex_exit(&dn->dn_mtx); 598 kmem_free(rp, sizeof (free_range_t)); 599 } 600 601 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) { 602 dnode_sync_free(dn, tx); 603 return; 604 } 605 606 if (dn->dn_next_nlevels[txgoff]) { 607 dnode_increase_indirection(dn, tx); 608 dn->dn_next_nlevels[txgoff] = 0; 609 } 610 611 dbuf_sync_list(list, tx); 612 613 if (dn->dn_object != DMU_META_DNODE_OBJECT) { 614 ASSERT3P(list_head(list), ==, NULL); 615 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 616 } 617 618 /* 619 * Although we have dropped our reference to the dnode, it 620 * can't be evicted until its written, and we haven't yet 621 * initiated the IO for the dnode's dbuf. 622 */ 623 } 624