1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/dbuf.h> 29 #include <sys/dnode.h> 30 #include <sys/dmu.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/dmu_objset.h> 33 #include <sys/dsl_dataset.h> 34 #include <sys/spa.h> 35 #include <sys/range_tree.h> 36 #include <sys/zfeature.h> 37 38 static void 39 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 40 { 41 dmu_buf_impl_t *db; 42 int txgoff = tx->tx_txg & TXG_MASK; 43 int nblkptr = dn->dn_phys->dn_nblkptr; 44 int old_toplvl = dn->dn_phys->dn_nlevels - 1; 45 int new_level = dn->dn_next_nlevels[txgoff]; 46 int i; 47 48 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 49 50 /* this dnode can't be paged out because it's dirty */ 51 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 52 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 53 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); 54 55 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 56 ASSERT(db != NULL); 57 58 dn->dn_phys->dn_nlevels = new_level; 59 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, 60 dn->dn_object, dn->dn_phys->dn_nlevels); 61 62 /* check for existing blkptrs in the dnode */ 63 for (i = 0; i < nblkptr; i++) 64 if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i])) 65 break; 66 if (i != nblkptr) { 67 /* transfer dnode's block pointers to new indirect block */ 68 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); 69 ASSERT(db->db.db_data); 70 ASSERT(arc_released(db->db_buf)); 71 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); 72 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 73 sizeof (blkptr_t) * nblkptr); 74 arc_buf_freeze(db->db_buf); 75 } 76 77 /* set dbuf's parent pointers to new indirect buf */ 78 for (i = 0; i < nblkptr; i++) { 79 dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i); 80 81 if (child == NULL) 82 continue; 83 #ifdef DEBUG 84 DB_DNODE_ENTER(child); 85 ASSERT3P(DB_DNODE(child), ==, dn); 86 DB_DNODE_EXIT(child); 87 #endif /* DEBUG */ 88 if (child->db_parent && child->db_parent != dn->dn_dbuf) { 89 ASSERT(child->db_parent->db_level == db->db_level); 90 ASSERT(child->db_blkptr != 91 &dn->dn_phys->dn_blkptr[child->db_blkid]); 92 mutex_exit(&child->db_mtx); 93 continue; 94 } 95 ASSERT(child->db_parent == NULL || 96 child->db_parent == dn->dn_dbuf); 97 98 child->db_parent = db; 99 dbuf_add_ref(db, child); 100 if (db->db.db_data) 101 child->db_blkptr = (blkptr_t *)db->db.db_data + i; 102 else 103 child->db_blkptr = NULL; 104 dprintf_dbuf_bp(child, child->db_blkptr, 105 "changed db_blkptr to new indirect %s", ""); 106 107 mutex_exit(&child->db_mtx); 108 } 109 110 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); 111 112 dbuf_rele(db, FTAG); 113 114 rw_exit(&dn->dn_struct_rwlock); 115 } 116 117 static void 118 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 119 { 120 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 121 uint64_t bytesfreed = 0; 122 123 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); 124 125 for (int i = 0; i < num; i++, bp++) { 126 if (BP_IS_HOLE(bp)) 127 continue; 128 129 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE); 130 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); 131 132 /* 133 * Save some useful information on the holes being 134 * punched, including logical size, type, and indirection 135 * level. Retaining birth time enables detection of when 136 * holes are punched for reducing the number of free 137 * records transmitted during a zfs send. 138 */ 139 140 uint64_t lsize = BP_GET_LSIZE(bp); 141 dmu_object_type_t type = BP_GET_TYPE(bp); 142 uint64_t lvl = BP_GET_LEVEL(bp); 143 144 bzero(bp, sizeof (blkptr_t)); 145 146 if (spa_feature_is_active(dn->dn_objset->os_spa, 147 SPA_FEATURE_HOLE_BIRTH)) { 148 BP_SET_LSIZE(bp, lsize); 149 BP_SET_TYPE(bp, type); 150 BP_SET_LEVEL(bp, lvl); 151 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0); 152 } 153 } 154 dnode_diduse_space(dn, -bytesfreed); 155 } 156 157 #ifdef ZFS_DEBUG 158 static void 159 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 160 { 161 int off, num; 162 int i, err, epbs; 163 uint64_t txg = tx->tx_txg; 164 dnode_t *dn; 165 166 DB_DNODE_ENTER(db); 167 dn = DB_DNODE(db); 168 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 169 off = start - (db->db_blkid * 1<<epbs); 170 num = end - start + 1; 171 172 ASSERT3U(off, >=, 0); 173 ASSERT3U(num, >=, 0); 174 ASSERT3U(db->db_level, >, 0); 175 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 176 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 177 ASSERT(db->db_blkptr != NULL); 178 179 for (i = off; i < off+num; i++) { 180 uint64_t *buf; 181 dmu_buf_impl_t *child; 182 dbuf_dirty_record_t *dr; 183 int j; 184 185 ASSERT(db->db_level == 1); 186 187 rw_enter(&dn->dn_struct_rwlock, RW_READER); 188 err = dbuf_hold_impl(dn, db->db_level-1, 189 (db->db_blkid << epbs) + i, TRUE, FTAG, &child); 190 rw_exit(&dn->dn_struct_rwlock); 191 if (err == ENOENT) 192 continue; 193 ASSERT(err == 0); 194 ASSERT(child->db_level == 0); 195 dr = child->db_last_dirty; 196 while (dr && dr->dr_txg > txg) 197 dr = dr->dr_next; 198 ASSERT(dr == NULL || dr->dr_txg == txg); 199 200 /* data_old better be zeroed */ 201 if (dr) { 202 buf = dr->dt.dl.dr_data->b_data; 203 for (j = 0; j < child->db.db_size >> 3; j++) { 204 if (buf[j] != 0) { 205 panic("freed data not zero: " 206 "child=%p i=%d off=%d num=%d\n", 207 (void *)child, i, off, num); 208 } 209 } 210 } 211 212 /* 213 * db_data better be zeroed unless it's dirty in a 214 * future txg. 215 */ 216 mutex_enter(&child->db_mtx); 217 buf = child->db.db_data; 218 if (buf != NULL && child->db_state != DB_FILL && 219 child->db_last_dirty == NULL) { 220 for (j = 0; j < child->db.db_size >> 3; j++) { 221 if (buf[j] != 0) { 222 panic("freed data not zero: " 223 "child=%p i=%d off=%d num=%d\n", 224 (void *)child, i, off, num); 225 } 226 } 227 } 228 mutex_exit(&child->db_mtx); 229 230 dbuf_rele(child, FTAG); 231 } 232 DB_DNODE_EXIT(db); 233 } 234 #endif 235 236 static void 237 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, 238 dmu_tx_t *tx) 239 { 240 dnode_t *dn; 241 blkptr_t *bp; 242 dmu_buf_impl_t *subdb; 243 uint64_t start, end, dbstart, dbend, i; 244 int epbs, shift; 245 246 /* 247 * There is a small possibility that this block will not be cached: 248 * 1 - if level > 1 and there are no children with level <= 1 249 * 2 - if this block was evicted since we read it from 250 * dmu_tx_hold_free(). 251 */ 252 if (db->db_state != DB_CACHED) 253 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 254 255 dbuf_release_bp(db); 256 bp = db->db.db_data; 257 258 DB_DNODE_ENTER(db); 259 dn = DB_DNODE(db); 260 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 261 shift = (db->db_level - 1) * epbs; 262 dbstart = db->db_blkid << epbs; 263 start = blkid >> shift; 264 if (dbstart < start) { 265 bp += start - dbstart; 266 } else { 267 start = dbstart; 268 } 269 dbend = ((db->db_blkid + 1) << epbs) - 1; 270 end = (blkid + nblks - 1) >> shift; 271 if (dbend <= end) 272 end = dbend; 273 274 ASSERT3U(start, <=, end); 275 276 if (db->db_level == 1) { 277 FREE_VERIFY(db, start, end, tx); 278 free_blocks(dn, bp, end-start+1, tx); 279 } else { 280 for (i = start; i <= end; i++, bp++) { 281 if (BP_IS_HOLE(bp)) 282 continue; 283 rw_enter(&dn->dn_struct_rwlock, RW_READER); 284 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1, 285 i, B_TRUE, FTAG, &subdb)); 286 rw_exit(&dn->dn_struct_rwlock); 287 ASSERT3P(bp, ==, subdb->db_blkptr); 288 289 free_children(subdb, blkid, nblks, tx); 290 dbuf_rele(subdb, FTAG); 291 } 292 } 293 294 /* If this whole block is free, free ourself too. */ 295 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 296 if (!BP_IS_HOLE(bp)) 297 break; 298 } 299 if (i == 1 << epbs) { 300 /* didn't find any non-holes */ 301 bzero(db->db.db_data, db->db.db_size); 302 free_blocks(dn, db->db_blkptr, 1, tx); 303 } else { 304 /* 305 * Partial block free; must be marked dirty so that it 306 * will be written out. 307 */ 308 ASSERT(db->db_dirtycnt > 0); 309 } 310 311 DB_DNODE_EXIT(db); 312 arc_buf_freeze(db->db_buf); 313 } 314 315 /* 316 * Traverse the indicated range of the provided file 317 * and "free" all the blocks contained there. 318 */ 319 static void 320 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks, 321 dmu_tx_t *tx) 322 { 323 blkptr_t *bp = dn->dn_phys->dn_blkptr; 324 int dnlevel = dn->dn_phys->dn_nlevels; 325 boolean_t trunc = B_FALSE; 326 327 if (blkid > dn->dn_phys->dn_maxblkid) 328 return; 329 330 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 331 if (blkid + nblks > dn->dn_phys->dn_maxblkid) { 332 nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 333 trunc = B_TRUE; 334 } 335 336 /* There are no indirect blocks in the object */ 337 if (dnlevel == 1) { 338 if (blkid >= dn->dn_phys->dn_nblkptr) { 339 /* this range was never made persistent */ 340 return; 341 } 342 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 343 free_blocks(dn, bp + blkid, nblks, tx); 344 } else { 345 int shift = (dnlevel - 1) * 346 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 347 int start = blkid >> shift; 348 int end = (blkid + nblks - 1) >> shift; 349 dmu_buf_impl_t *db; 350 351 ASSERT(start < dn->dn_phys->dn_nblkptr); 352 bp += start; 353 for (int i = start; i <= end; i++, bp++) { 354 if (BP_IS_HOLE(bp)) 355 continue; 356 rw_enter(&dn->dn_struct_rwlock, RW_READER); 357 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i, 358 TRUE, FTAG, &db)); 359 rw_exit(&dn->dn_struct_rwlock); 360 361 free_children(db, blkid, nblks, tx); 362 dbuf_rele(db, FTAG); 363 } 364 } 365 366 if (trunc) { 367 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1; 368 369 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 370 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 371 ASSERT(off < dn->dn_phys->dn_maxblkid || 372 dn->dn_phys->dn_maxblkid == 0 || 373 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 374 } 375 } 376 377 typedef struct dnode_sync_free_range_arg { 378 dnode_t *dsfra_dnode; 379 dmu_tx_t *dsfra_tx; 380 } dnode_sync_free_range_arg_t; 381 382 static void 383 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks) 384 { 385 dnode_sync_free_range_arg_t *dsfra = arg; 386 dnode_t *dn = dsfra->dsfra_dnode; 387 388 mutex_exit(&dn->dn_mtx); 389 dnode_sync_free_range_impl(dn, blkid, nblks, dsfra->dsfra_tx); 390 mutex_enter(&dn->dn_mtx); 391 } 392 393 /* 394 * Try to kick all the dnode's dbufs out of the cache... 395 */ 396 void 397 dnode_evict_dbufs(dnode_t *dn) 398 { 399 int progress; 400 int pass = 0; 401 402 do { 403 dmu_buf_impl_t *db, *db_next; 404 int evicting = FALSE; 405 406 progress = FALSE; 407 mutex_enter(&dn->dn_dbufs_mtx); 408 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) { 409 db_next = AVL_NEXT(&dn->dn_dbufs, db); 410 #ifdef DEBUG 411 DB_DNODE_ENTER(db); 412 ASSERT3P(DB_DNODE(db), ==, dn); 413 DB_DNODE_EXIT(db); 414 #endif /* DEBUG */ 415 416 mutex_enter(&db->db_mtx); 417 if (db->db_state == DB_EVICTING) { 418 progress = TRUE; 419 evicting = TRUE; 420 mutex_exit(&db->db_mtx); 421 } else if (refcount_is_zero(&db->db_holds)) { 422 progress = TRUE; 423 dbuf_clear(db); /* exits db_mtx for us */ 424 } else { 425 mutex_exit(&db->db_mtx); 426 } 427 428 } 429 /* 430 * NB: we need to drop dn_dbufs_mtx between passes so 431 * that any DB_EVICTING dbufs can make progress. 432 * Ideally, we would have some cv we could wait on, but 433 * since we don't, just wait a bit to give the other 434 * thread a chance to run. 435 */ 436 mutex_exit(&dn->dn_dbufs_mtx); 437 if (evicting) 438 delay(1); 439 pass++; 440 ASSERT(pass < 100); /* sanity check */ 441 } while (progress); 442 443 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 444 if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) { 445 mutex_enter(&dn->dn_bonus->db_mtx); 446 dbuf_evict(dn->dn_bonus); 447 dn->dn_bonus = NULL; 448 } 449 rw_exit(&dn->dn_struct_rwlock); 450 } 451 452 static void 453 dnode_undirty_dbufs(list_t *list) 454 { 455 dbuf_dirty_record_t *dr; 456 457 while (dr = list_head(list)) { 458 dmu_buf_impl_t *db = dr->dr_dbuf; 459 uint64_t txg = dr->dr_txg; 460 461 if (db->db_level != 0) 462 dnode_undirty_dbufs(&dr->dt.di.dr_children); 463 464 mutex_enter(&db->db_mtx); 465 /* XXX - use dbuf_undirty()? */ 466 list_remove(list, dr); 467 ASSERT(db->db_last_dirty == dr); 468 db->db_last_dirty = NULL; 469 db->db_dirtycnt -= 1; 470 if (db->db_level == 0) { 471 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 472 dr->dt.dl.dr_data == db->db_buf); 473 dbuf_unoverride(dr); 474 } else { 475 mutex_destroy(&dr->dt.di.dr_mtx); 476 list_destroy(&dr->dt.di.dr_children); 477 } 478 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 479 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 480 } 481 } 482 483 static void 484 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 485 { 486 int txgoff = tx->tx_txg & TXG_MASK; 487 488 ASSERT(dmu_tx_is_syncing(tx)); 489 490 /* 491 * Our contents should have been freed in dnode_sync() by the 492 * free range record inserted by the caller of dnode_free(). 493 */ 494 ASSERT0(DN_USED_BYTES(dn->dn_phys)); 495 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); 496 497 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); 498 dnode_evict_dbufs(dn); 499 ASSERT(avl_is_empty(&dn->dn_dbufs)); 500 ASSERT3P(dn->dn_bonus, ==, NULL); 501 502 /* 503 * XXX - It would be nice to assert this, but we may still 504 * have residual holds from async evictions from the arc... 505 * 506 * zfs_obj_to_path() also depends on this being 507 * commented out. 508 * 509 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); 510 */ 511 512 /* Undirty next bits */ 513 dn->dn_next_nlevels[txgoff] = 0; 514 dn->dn_next_indblkshift[txgoff] = 0; 515 dn->dn_next_blksz[txgoff] = 0; 516 517 /* ASSERT(blkptrs are zero); */ 518 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 519 ASSERT(dn->dn_type != DMU_OT_NONE); 520 521 ASSERT(dn->dn_free_txg > 0); 522 if (dn->dn_allocated_txg != dn->dn_free_txg) 523 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx); 524 bzero(dn->dn_phys, sizeof (dnode_phys_t)); 525 526 mutex_enter(&dn->dn_mtx); 527 dn->dn_type = DMU_OT_NONE; 528 dn->dn_maxblkid = 0; 529 dn->dn_allocated_txg = 0; 530 dn->dn_free_txg = 0; 531 dn->dn_have_spill = B_FALSE; 532 mutex_exit(&dn->dn_mtx); 533 534 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 535 536 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 537 /* 538 * Now that we've released our hold, the dnode may 539 * be evicted, so we musn't access it. 540 */ 541 } 542 543 /* 544 * Write out the dnode's dirty buffers. 545 */ 546 void 547 dnode_sync(dnode_t *dn, dmu_tx_t *tx) 548 { 549 dnode_phys_t *dnp = dn->dn_phys; 550 int txgoff = tx->tx_txg & TXG_MASK; 551 list_t *list = &dn->dn_dirty_records[txgoff]; 552 static const dnode_phys_t zerodn = { 0 }; 553 boolean_t kill_spill = B_FALSE; 554 555 ASSERT(dmu_tx_is_syncing(tx)); 556 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 557 ASSERT(dnp->dn_type != DMU_OT_NONE || 558 bcmp(dnp, &zerodn, DNODE_SIZE) == 0); 559 DNODE_VERIFY(dn); 560 561 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); 562 563 if (dmu_objset_userused_enabled(dn->dn_objset) && 564 !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 565 mutex_enter(&dn->dn_mtx); 566 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys); 567 dn->dn_oldflags = dn->dn_phys->dn_flags; 568 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED; 569 mutex_exit(&dn->dn_mtx); 570 dmu_objset_userquota_get_ids(dn, B_FALSE, tx); 571 } else { 572 /* Once we account for it, we should always account for it. */ 573 ASSERT(!(dn->dn_phys->dn_flags & 574 DNODE_FLAG_USERUSED_ACCOUNTED)); 575 } 576 577 mutex_enter(&dn->dn_mtx); 578 if (dn->dn_allocated_txg == tx->tx_txg) { 579 /* The dnode is newly allocated or reallocated */ 580 if (dnp->dn_type == DMU_OT_NONE) { 581 /* this is a first alloc, not a realloc */ 582 dnp->dn_nlevels = 1; 583 dnp->dn_nblkptr = dn->dn_nblkptr; 584 } 585 586 dnp->dn_type = dn->dn_type; 587 dnp->dn_bonustype = dn->dn_bonustype; 588 dnp->dn_bonuslen = dn->dn_bonuslen; 589 } 590 ASSERT(dnp->dn_nlevels > 1 || 591 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 592 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) || 593 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 594 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 595 ASSERT(dnp->dn_nlevels < 2 || 596 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 597 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift); 598 599 if (dn->dn_next_type[txgoff] != 0) { 600 dnp->dn_type = dn->dn_type; 601 dn->dn_next_type[txgoff] = 0; 602 } 603 604 if (dn->dn_next_blksz[txgoff] != 0) { 605 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], 606 SPA_MINBLOCKSIZE) == 0); 607 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || 608 dn->dn_maxblkid == 0 || list_head(list) != NULL || 609 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == 610 dnp->dn_datablkszsec || 611 range_tree_space(dn->dn_free_ranges[txgoff]) != 0); 612 dnp->dn_datablkszsec = 613 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; 614 dn->dn_next_blksz[txgoff] = 0; 615 } 616 617 if (dn->dn_next_bonuslen[txgoff] != 0) { 618 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) 619 dnp->dn_bonuslen = 0; 620 else 621 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; 622 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN); 623 dn->dn_next_bonuslen[txgoff] = 0; 624 } 625 626 if (dn->dn_next_bonustype[txgoff] != 0) { 627 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff])); 628 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff]; 629 dn->dn_next_bonustype[txgoff] = 0; 630 } 631 632 boolean_t freeing_dnode = dn->dn_free_txg > 0 && 633 dn->dn_free_txg <= tx->tx_txg; 634 635 /* 636 * Remove the spill block if we have been explicitly asked to 637 * remove it, or if the object is being removed. 638 */ 639 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) { 640 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 641 kill_spill = B_TRUE; 642 dn->dn_rm_spillblk[txgoff] = 0; 643 } 644 645 if (dn->dn_next_indblkshift[txgoff] != 0) { 646 ASSERT(dnp->dn_nlevels == 1); 647 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 648 dn->dn_next_indblkshift[txgoff] = 0; 649 } 650 651 /* 652 * Just take the live (open-context) values for checksum and compress. 653 * Strictly speaking it's a future leak, but nothing bad happens if we 654 * start using the new checksum or compress algorithm a little early. 655 */ 656 dnp->dn_checksum = dn->dn_checksum; 657 dnp->dn_compress = dn->dn_compress; 658 659 mutex_exit(&dn->dn_mtx); 660 661 if (kill_spill) { 662 free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx); 663 mutex_enter(&dn->dn_mtx); 664 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR; 665 mutex_exit(&dn->dn_mtx); 666 } 667 668 /* process all the "freed" ranges in the file */ 669 if (dn->dn_free_ranges[txgoff] != NULL) { 670 dnode_sync_free_range_arg_t dsfra; 671 dsfra.dsfra_dnode = dn; 672 dsfra.dsfra_tx = tx; 673 mutex_enter(&dn->dn_mtx); 674 range_tree_vacate(dn->dn_free_ranges[txgoff], 675 dnode_sync_free_range, &dsfra); 676 range_tree_destroy(dn->dn_free_ranges[txgoff]); 677 dn->dn_free_ranges[txgoff] = NULL; 678 mutex_exit(&dn->dn_mtx); 679 } 680 681 if (freeing_dnode) { 682 dnode_sync_free(dn, tx); 683 return; 684 } 685 686 if (dn->dn_next_nlevels[txgoff]) { 687 dnode_increase_indirection(dn, tx); 688 dn->dn_next_nlevels[txgoff] = 0; 689 } 690 691 if (dn->dn_next_nblkptr[txgoff]) { 692 /* this should only happen on a realloc */ 693 ASSERT(dn->dn_allocated_txg == tx->tx_txg); 694 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) { 695 /* zero the new blkptrs we are gaining */ 696 bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 697 sizeof (blkptr_t) * 698 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr)); 699 #ifdef ZFS_DEBUG 700 } else { 701 int i; 702 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr); 703 /* the blkptrs we are losing better be unallocated */ 704 for (i = dn->dn_next_nblkptr[txgoff]; 705 i < dnp->dn_nblkptr; i++) 706 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i])); 707 #endif 708 } 709 mutex_enter(&dn->dn_mtx); 710 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff]; 711 dn->dn_next_nblkptr[txgoff] = 0; 712 mutex_exit(&dn->dn_mtx); 713 } 714 715 dbuf_sync_list(list, tx); 716 717 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 718 ASSERT3P(list_head(list), ==, NULL); 719 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 720 } 721 722 /* 723 * Although we have dropped our reference to the dnode, it 724 * can't be evicted until its written, and we haven't yet 725 * initiated the IO for the dnode's dbuf. 726 */ 727 } 728