1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/dbuf.h> 30 #include <sys/dnode.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dmu_objset.h> 34 #include <sys/dsl_dataset.h> 35 #include <sys/spa.h> 36 #include <sys/range_tree.h> 37 #include <sys/zfeature.h> 38 39 static void 40 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 41 { 42 dmu_buf_impl_t *db; 43 int txgoff = tx->tx_txg & TXG_MASK; 44 int nblkptr = dn->dn_phys->dn_nblkptr; 45 int old_toplvl = dn->dn_phys->dn_nlevels - 1; 46 int new_level = dn->dn_next_nlevels[txgoff]; 47 int i; 48 49 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 50 51 /* this dnode can't be paged out because it's dirty */ 52 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 53 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 54 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); 55 56 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 57 ASSERT(db != NULL); 58 59 dn->dn_phys->dn_nlevels = new_level; 60 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, 61 dn->dn_object, dn->dn_phys->dn_nlevels); 62 63 /* transfer dnode's block pointers to new indirect block */ 64 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); 65 ASSERT(db->db.db_data); 66 ASSERT(arc_released(db->db_buf)); 67 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); 68 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 69 sizeof (blkptr_t) * nblkptr); 70 arc_buf_freeze(db->db_buf); 71 72 /* set dbuf's parent pointers to new indirect buf */ 73 for (i = 0; i < nblkptr; i++) { 74 dmu_buf_impl_t *child = 75 dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i); 76 77 if (child == NULL) 78 continue; 79 #ifdef DEBUG 80 DB_DNODE_ENTER(child); 81 ASSERT3P(DB_DNODE(child), ==, dn); 82 DB_DNODE_EXIT(child); 83 #endif /* DEBUG */ 84 if (child->db_parent && child->db_parent != dn->dn_dbuf) { 85 ASSERT(child->db_parent->db_level == db->db_level); 86 ASSERT(child->db_blkptr != 87 &dn->dn_phys->dn_blkptr[child->db_blkid]); 88 mutex_exit(&child->db_mtx); 89 continue; 90 } 91 ASSERT(child->db_parent == NULL || 92 child->db_parent == dn->dn_dbuf); 93 94 child->db_parent = db; 95 dbuf_add_ref(db, child); 96 if (db->db.db_data) 97 child->db_blkptr = (blkptr_t *)db->db.db_data + i; 98 else 99 child->db_blkptr = NULL; 100 dprintf_dbuf_bp(child, child->db_blkptr, 101 "changed db_blkptr to new indirect %s", ""); 102 103 mutex_exit(&child->db_mtx); 104 } 105 106 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); 107 108 dbuf_rele(db, FTAG); 109 110 rw_exit(&dn->dn_struct_rwlock); 111 } 112 113 static void 114 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 115 { 116 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 117 uint64_t bytesfreed = 0; 118 119 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); 120 121 for (int i = 0; i < num; i++, bp++) { 122 if (BP_IS_HOLE(bp)) 123 continue; 124 125 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE); 126 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); 127 128 /* 129 * Save some useful information on the holes being 130 * punched, including logical size, type, and indirection 131 * level. Retaining birth time enables detection of when 132 * holes are punched for reducing the number of free 133 * records transmitted during a zfs send. 134 */ 135 136 uint64_t lsize = BP_GET_LSIZE(bp); 137 dmu_object_type_t type = BP_GET_TYPE(bp); 138 uint64_t lvl = BP_GET_LEVEL(bp); 139 140 bzero(bp, sizeof (blkptr_t)); 141 142 if (spa_feature_is_active(dn->dn_objset->os_spa, 143 SPA_FEATURE_HOLE_BIRTH)) { 144 BP_SET_LSIZE(bp, lsize); 145 BP_SET_TYPE(bp, type); 146 BP_SET_LEVEL(bp, lvl); 147 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0); 148 } 149 } 150 dnode_diduse_space(dn, -bytesfreed); 151 } 152 153 #ifdef ZFS_DEBUG 154 static void 155 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 156 { 157 int off, num; 158 int i, err, epbs; 159 uint64_t txg = tx->tx_txg; 160 dnode_t *dn; 161 162 DB_DNODE_ENTER(db); 163 dn = DB_DNODE(db); 164 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 165 off = start - (db->db_blkid * 1<<epbs); 166 num = end - start + 1; 167 168 ASSERT3U(off, >=, 0); 169 ASSERT3U(num, >=, 0); 170 ASSERT3U(db->db_level, >, 0); 171 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 172 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 173 ASSERT(db->db_blkptr != NULL); 174 175 for (i = off; i < off+num; i++) { 176 uint64_t *buf; 177 dmu_buf_impl_t *child; 178 dbuf_dirty_record_t *dr; 179 int j; 180 181 ASSERT(db->db_level == 1); 182 183 rw_enter(&dn->dn_struct_rwlock, RW_READER); 184 err = dbuf_hold_impl(dn, db->db_level-1, 185 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child); 186 rw_exit(&dn->dn_struct_rwlock); 187 if (err == ENOENT) 188 continue; 189 ASSERT(err == 0); 190 ASSERT(child->db_level == 0); 191 dr = child->db_last_dirty; 192 while (dr && dr->dr_txg > txg) 193 dr = dr->dr_next; 194 ASSERT(dr == NULL || dr->dr_txg == txg); 195 196 /* data_old better be zeroed */ 197 if (dr) { 198 buf = dr->dt.dl.dr_data->b_data; 199 for (j = 0; j < child->db.db_size >> 3; j++) { 200 if (buf[j] != 0) { 201 panic("freed data not zero: " 202 "child=%p i=%d off=%d num=%d\n", 203 (void *)child, i, off, num); 204 } 205 } 206 } 207 208 /* 209 * db_data better be zeroed unless it's dirty in a 210 * future txg. 211 */ 212 mutex_enter(&child->db_mtx); 213 buf = child->db.db_data; 214 if (buf != NULL && child->db_state != DB_FILL && 215 child->db_last_dirty == NULL) { 216 for (j = 0; j < child->db.db_size >> 3; j++) { 217 if (buf[j] != 0) { 218 panic("freed data not zero: " 219 "child=%p i=%d off=%d num=%d\n", 220 (void *)child, i, off, num); 221 } 222 } 223 } 224 mutex_exit(&child->db_mtx); 225 226 dbuf_rele(child, FTAG); 227 } 228 DB_DNODE_EXIT(db); 229 } 230 #endif 231 232 static void 233 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, 234 dmu_tx_t *tx) 235 { 236 dnode_t *dn; 237 blkptr_t *bp; 238 dmu_buf_impl_t *subdb; 239 uint64_t start, end, dbstart, dbend, i; 240 int epbs, shift; 241 242 /* 243 * There is a small possibility that this block will not be cached: 244 * 1 - if level > 1 and there are no children with level <= 1 245 * 2 - if this block was evicted since we read it from 246 * dmu_tx_hold_free(). 247 */ 248 if (db->db_state != DB_CACHED) 249 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 250 251 dbuf_release_bp(db); 252 bp = db->db.db_data; 253 254 DB_DNODE_ENTER(db); 255 dn = DB_DNODE(db); 256 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 257 shift = (db->db_level - 1) * epbs; 258 dbstart = db->db_blkid << epbs; 259 start = blkid >> shift; 260 if (dbstart < start) { 261 bp += start - dbstart; 262 } else { 263 start = dbstart; 264 } 265 dbend = ((db->db_blkid + 1) << epbs) - 1; 266 end = (blkid + nblks - 1) >> shift; 267 if (dbend <= end) 268 end = dbend; 269 270 ASSERT3U(start, <=, end); 271 272 if (db->db_level == 1) { 273 FREE_VERIFY(db, start, end, tx); 274 free_blocks(dn, bp, end-start+1, tx); 275 } else { 276 for (i = start; i <= end; i++, bp++) { 277 if (BP_IS_HOLE(bp)) 278 continue; 279 rw_enter(&dn->dn_struct_rwlock, RW_READER); 280 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1, 281 i, TRUE, FALSE, FTAG, &subdb)); 282 rw_exit(&dn->dn_struct_rwlock); 283 ASSERT3P(bp, ==, subdb->db_blkptr); 284 285 free_children(subdb, blkid, nblks, tx); 286 dbuf_rele(subdb, FTAG); 287 } 288 } 289 290 /* If this whole block is free, free ourself too. */ 291 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 292 if (!BP_IS_HOLE(bp)) 293 break; 294 } 295 if (i == 1 << epbs) { 296 /* didn't find any non-holes */ 297 bzero(db->db.db_data, db->db.db_size); 298 free_blocks(dn, db->db_blkptr, 1, tx); 299 } else { 300 /* 301 * Partial block free; must be marked dirty so that it 302 * will be written out. 303 */ 304 ASSERT(db->db_dirtycnt > 0); 305 } 306 307 DB_DNODE_EXIT(db); 308 arc_buf_freeze(db->db_buf); 309 } 310 311 /* 312 * Traverse the indicated range of the provided file 313 * and "free" all the blocks contained there. 314 */ 315 static void 316 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks, 317 dmu_tx_t *tx) 318 { 319 blkptr_t *bp = dn->dn_phys->dn_blkptr; 320 int dnlevel = dn->dn_phys->dn_nlevels; 321 boolean_t trunc = B_FALSE; 322 323 if (blkid > dn->dn_phys->dn_maxblkid) 324 return; 325 326 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 327 if (blkid + nblks > dn->dn_phys->dn_maxblkid) { 328 nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 329 trunc = B_TRUE; 330 } 331 332 /* There are no indirect blocks in the object */ 333 if (dnlevel == 1) { 334 if (blkid >= dn->dn_phys->dn_nblkptr) { 335 /* this range was never made persistent */ 336 return; 337 } 338 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 339 free_blocks(dn, bp + blkid, nblks, tx); 340 } else { 341 int shift = (dnlevel - 1) * 342 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 343 int start = blkid >> shift; 344 int end = (blkid + nblks - 1) >> shift; 345 dmu_buf_impl_t *db; 346 347 ASSERT(start < dn->dn_phys->dn_nblkptr); 348 bp += start; 349 for (int i = start; i <= end; i++, bp++) { 350 if (BP_IS_HOLE(bp)) 351 continue; 352 rw_enter(&dn->dn_struct_rwlock, RW_READER); 353 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i, 354 TRUE, FALSE, FTAG, &db)); 355 rw_exit(&dn->dn_struct_rwlock); 356 357 free_children(db, blkid, nblks, tx); 358 dbuf_rele(db, FTAG); 359 } 360 } 361 362 if (trunc) { 363 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1; 364 365 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 366 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 367 ASSERT(off < dn->dn_phys->dn_maxblkid || 368 dn->dn_phys->dn_maxblkid == 0 || 369 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 370 } 371 } 372 373 typedef struct dnode_sync_free_range_arg { 374 dnode_t *dsfra_dnode; 375 dmu_tx_t *dsfra_tx; 376 } dnode_sync_free_range_arg_t; 377 378 static void 379 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks) 380 { 381 dnode_sync_free_range_arg_t *dsfra = arg; 382 dnode_t *dn = dsfra->dsfra_dnode; 383 384 mutex_exit(&dn->dn_mtx); 385 dnode_sync_free_range_impl(dn, blkid, nblks, dsfra->dsfra_tx); 386 mutex_enter(&dn->dn_mtx); 387 } 388 389 /* 390 * Try to kick all the dnode's dbufs out of the cache... 391 */ 392 void 393 dnode_evict_dbufs(dnode_t *dn) 394 { 395 dmu_buf_impl_t db_marker; 396 dmu_buf_impl_t *db, *db_next; 397 398 mutex_enter(&dn->dn_dbufs_mtx); 399 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) { 400 401 #ifdef DEBUG 402 DB_DNODE_ENTER(db); 403 ASSERT3P(DB_DNODE(db), ==, dn); 404 DB_DNODE_EXIT(db); 405 #endif /* DEBUG */ 406 407 mutex_enter(&db->db_mtx); 408 if (db->db_state != DB_EVICTING && 409 refcount_is_zero(&db->db_holds)) { 410 db_marker.db_level = db->db_level; 411 db_marker.db_blkid = db->db_blkid; 412 db_marker.db_state = DB_SEARCH; 413 avl_insert_here(&dn->dn_dbufs, &db_marker, db, 414 AVL_BEFORE); 415 416 dbuf_destroy(db); 417 418 db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker); 419 avl_remove(&dn->dn_dbufs, &db_marker); 420 } else { 421 db->db_pending_evict = TRUE; 422 mutex_exit(&db->db_mtx); 423 db_next = AVL_NEXT(&dn->dn_dbufs, db); 424 } 425 } 426 mutex_exit(&dn->dn_dbufs_mtx); 427 428 dnode_evict_bonus(dn); 429 } 430 431 void 432 dnode_evict_bonus(dnode_t *dn) 433 { 434 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 435 if (dn->dn_bonus != NULL) { 436 if (refcount_is_zero(&dn->dn_bonus->db_holds)) { 437 mutex_enter(&dn->dn_bonus->db_mtx); 438 dbuf_destroy(dn->dn_bonus); 439 dn->dn_bonus = NULL; 440 } else { 441 dn->dn_bonus->db_pending_evict = TRUE; 442 } 443 } 444 rw_exit(&dn->dn_struct_rwlock); 445 } 446 447 static void 448 dnode_undirty_dbufs(list_t *list) 449 { 450 dbuf_dirty_record_t *dr; 451 452 while (dr = list_head(list)) { 453 dmu_buf_impl_t *db = dr->dr_dbuf; 454 uint64_t txg = dr->dr_txg; 455 456 if (db->db_level != 0) 457 dnode_undirty_dbufs(&dr->dt.di.dr_children); 458 459 mutex_enter(&db->db_mtx); 460 /* XXX - use dbuf_undirty()? */ 461 list_remove(list, dr); 462 ASSERT(db->db_last_dirty == dr); 463 db->db_last_dirty = NULL; 464 db->db_dirtycnt -= 1; 465 if (db->db_level == 0) { 466 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 467 dr->dt.dl.dr_data == db->db_buf); 468 dbuf_unoverride(dr); 469 } else { 470 mutex_destroy(&dr->dt.di.dr_mtx); 471 list_destroy(&dr->dt.di.dr_children); 472 } 473 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 474 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 475 } 476 } 477 478 static void 479 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 480 { 481 int txgoff = tx->tx_txg & TXG_MASK; 482 483 ASSERT(dmu_tx_is_syncing(tx)); 484 485 /* 486 * Our contents should have been freed in dnode_sync() by the 487 * free range record inserted by the caller of dnode_free(). 488 */ 489 ASSERT0(DN_USED_BYTES(dn->dn_phys)); 490 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); 491 492 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); 493 dnode_evict_dbufs(dn); 494 495 /* 496 * XXX - It would be nice to assert this, but we may still 497 * have residual holds from async evictions from the arc... 498 * 499 * zfs_obj_to_path() also depends on this being 500 * commented out. 501 * 502 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); 503 */ 504 505 /* Undirty next bits */ 506 dn->dn_next_nlevels[txgoff] = 0; 507 dn->dn_next_indblkshift[txgoff] = 0; 508 dn->dn_next_blksz[txgoff] = 0; 509 510 /* ASSERT(blkptrs are zero); */ 511 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 512 ASSERT(dn->dn_type != DMU_OT_NONE); 513 514 ASSERT(dn->dn_free_txg > 0); 515 if (dn->dn_allocated_txg != dn->dn_free_txg) 516 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx); 517 bzero(dn->dn_phys, sizeof (dnode_phys_t)); 518 519 mutex_enter(&dn->dn_mtx); 520 dn->dn_type = DMU_OT_NONE; 521 dn->dn_maxblkid = 0; 522 dn->dn_allocated_txg = 0; 523 dn->dn_free_txg = 0; 524 dn->dn_have_spill = B_FALSE; 525 mutex_exit(&dn->dn_mtx); 526 527 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 528 529 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 530 /* 531 * Now that we've released our hold, the dnode may 532 * be evicted, so we musn't access it. 533 */ 534 } 535 536 /* 537 * Write out the dnode's dirty buffers. 538 */ 539 void 540 dnode_sync(dnode_t *dn, dmu_tx_t *tx) 541 { 542 dnode_phys_t *dnp = dn->dn_phys; 543 int txgoff = tx->tx_txg & TXG_MASK; 544 list_t *list = &dn->dn_dirty_records[txgoff]; 545 static const dnode_phys_t zerodn = { 0 }; 546 boolean_t kill_spill = B_FALSE; 547 548 ASSERT(dmu_tx_is_syncing(tx)); 549 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 550 ASSERT(dnp->dn_type != DMU_OT_NONE || 551 bcmp(dnp, &zerodn, DNODE_SIZE) == 0); 552 DNODE_VERIFY(dn); 553 554 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); 555 556 if (dmu_objset_userused_enabled(dn->dn_objset) && 557 !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 558 mutex_enter(&dn->dn_mtx); 559 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys); 560 dn->dn_oldflags = dn->dn_phys->dn_flags; 561 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED; 562 mutex_exit(&dn->dn_mtx); 563 dmu_objset_userquota_get_ids(dn, B_FALSE, tx); 564 } else { 565 /* Once we account for it, we should always account for it. */ 566 ASSERT(!(dn->dn_phys->dn_flags & 567 DNODE_FLAG_USERUSED_ACCOUNTED)); 568 } 569 570 mutex_enter(&dn->dn_mtx); 571 if (dn->dn_allocated_txg == tx->tx_txg) { 572 /* The dnode is newly allocated or reallocated */ 573 if (dnp->dn_type == DMU_OT_NONE) { 574 /* this is a first alloc, not a realloc */ 575 dnp->dn_nlevels = 1; 576 dnp->dn_nblkptr = dn->dn_nblkptr; 577 } 578 579 dnp->dn_type = dn->dn_type; 580 dnp->dn_bonustype = dn->dn_bonustype; 581 dnp->dn_bonuslen = dn->dn_bonuslen; 582 } 583 ASSERT(dnp->dn_nlevels > 1 || 584 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 585 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) || 586 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 587 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 588 ASSERT(dnp->dn_nlevels < 2 || 589 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 590 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift); 591 592 if (dn->dn_next_type[txgoff] != 0) { 593 dnp->dn_type = dn->dn_type; 594 dn->dn_next_type[txgoff] = 0; 595 } 596 597 if (dn->dn_next_blksz[txgoff] != 0) { 598 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], 599 SPA_MINBLOCKSIZE) == 0); 600 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || 601 dn->dn_maxblkid == 0 || list_head(list) != NULL || 602 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == 603 dnp->dn_datablkszsec || 604 range_tree_space(dn->dn_free_ranges[txgoff]) != 0); 605 dnp->dn_datablkszsec = 606 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; 607 dn->dn_next_blksz[txgoff] = 0; 608 } 609 610 if (dn->dn_next_bonuslen[txgoff] != 0) { 611 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) 612 dnp->dn_bonuslen = 0; 613 else 614 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; 615 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN); 616 dn->dn_next_bonuslen[txgoff] = 0; 617 } 618 619 if (dn->dn_next_bonustype[txgoff] != 0) { 620 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff])); 621 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff]; 622 dn->dn_next_bonustype[txgoff] = 0; 623 } 624 625 boolean_t freeing_dnode = dn->dn_free_txg > 0 && 626 dn->dn_free_txg <= tx->tx_txg; 627 628 /* 629 * Remove the spill block if we have been explicitly asked to 630 * remove it, or if the object is being removed. 631 */ 632 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) { 633 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 634 kill_spill = B_TRUE; 635 dn->dn_rm_spillblk[txgoff] = 0; 636 } 637 638 if (dn->dn_next_indblkshift[txgoff] != 0) { 639 ASSERT(dnp->dn_nlevels == 1); 640 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 641 dn->dn_next_indblkshift[txgoff] = 0; 642 } 643 644 /* 645 * Just take the live (open-context) values for checksum and compress. 646 * Strictly speaking it's a future leak, but nothing bad happens if we 647 * start using the new checksum or compress algorithm a little early. 648 */ 649 dnp->dn_checksum = dn->dn_checksum; 650 dnp->dn_compress = dn->dn_compress; 651 652 mutex_exit(&dn->dn_mtx); 653 654 if (kill_spill) { 655 free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx); 656 mutex_enter(&dn->dn_mtx); 657 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR; 658 mutex_exit(&dn->dn_mtx); 659 } 660 661 /* process all the "freed" ranges in the file */ 662 if (dn->dn_free_ranges[txgoff] != NULL) { 663 dnode_sync_free_range_arg_t dsfra; 664 dsfra.dsfra_dnode = dn; 665 dsfra.dsfra_tx = tx; 666 mutex_enter(&dn->dn_mtx); 667 range_tree_vacate(dn->dn_free_ranges[txgoff], 668 dnode_sync_free_range, &dsfra); 669 range_tree_destroy(dn->dn_free_ranges[txgoff]); 670 dn->dn_free_ranges[txgoff] = NULL; 671 mutex_exit(&dn->dn_mtx); 672 } 673 674 if (freeing_dnode) { 675 dn->dn_objset->os_freed_dnodes++; 676 dnode_sync_free(dn, tx); 677 return; 678 } 679 680 if (dn->dn_next_nlevels[txgoff]) { 681 dnode_increase_indirection(dn, tx); 682 dn->dn_next_nlevels[txgoff] = 0; 683 } 684 685 if (dn->dn_next_nblkptr[txgoff]) { 686 /* this should only happen on a realloc */ 687 ASSERT(dn->dn_allocated_txg == tx->tx_txg); 688 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) { 689 /* zero the new blkptrs we are gaining */ 690 bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 691 sizeof (blkptr_t) * 692 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr)); 693 #ifdef ZFS_DEBUG 694 } else { 695 int i; 696 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr); 697 /* the blkptrs we are losing better be unallocated */ 698 for (i = dn->dn_next_nblkptr[txgoff]; 699 i < dnp->dn_nblkptr; i++) 700 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i])); 701 #endif 702 } 703 mutex_enter(&dn->dn_mtx); 704 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff]; 705 dn->dn_next_nblkptr[txgoff] = 0; 706 mutex_exit(&dn->dn_mtx); 707 } 708 709 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx); 710 711 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 712 ASSERT3P(list_head(list), ==, NULL); 713 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 714 } 715 716 /* 717 * Although we have dropped our reference to the dnode, it 718 * can't be evicted until its written, and we haven't yet 719 * initiated the IO for the dnode's dbuf. 720 */ 721 } 722