1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/dbuf.h> 30 #include <sys/dnode.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dmu_objset.h> 34 #include <sys/dsl_dataset.h> 35 #include <sys/spa.h> 36 #include <sys/range_tree.h> 37 #include <sys/zfeature.h> 38 39 static void 40 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 41 { 42 dmu_buf_impl_t *db; 43 int txgoff = tx->tx_txg & TXG_MASK; 44 int nblkptr = dn->dn_phys->dn_nblkptr; 45 int old_toplvl = dn->dn_phys->dn_nlevels - 1; 46 int new_level = dn->dn_next_nlevels[txgoff]; 47 int i; 48 49 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 50 51 /* this dnode can't be paged out because it's dirty */ 52 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 53 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 54 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); 55 56 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 57 ASSERT(db != NULL); 58 59 dn->dn_phys->dn_nlevels = new_level; 60 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, 61 dn->dn_object, dn->dn_phys->dn_nlevels); 62 63 /* transfer dnode's block pointers to new indirect block */ 64 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); 65 ASSERT(db->db.db_data); 66 ASSERT(arc_released(db->db_buf)); 67 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); 68 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 69 sizeof (blkptr_t) * nblkptr); 70 arc_buf_freeze(db->db_buf); 71 72 /* set dbuf's parent pointers to new indirect buf */ 73 for (i = 0; i < nblkptr; i++) { 74 dmu_buf_impl_t *child = 75 dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i); 76 77 if (child == NULL) 78 continue; 79 #ifdef DEBUG 80 DB_DNODE_ENTER(child); 81 ASSERT3P(DB_DNODE(child), ==, dn); 82 DB_DNODE_EXIT(child); 83 #endif /* DEBUG */ 84 if (child->db_parent && child->db_parent != dn->dn_dbuf) { 85 ASSERT(child->db_parent->db_level == db->db_level); 86 ASSERT(child->db_blkptr != 87 &dn->dn_phys->dn_blkptr[child->db_blkid]); 88 mutex_exit(&child->db_mtx); 89 continue; 90 } 91 ASSERT(child->db_parent == NULL || 92 child->db_parent == dn->dn_dbuf); 93 94 child->db_parent = db; 95 dbuf_add_ref(db, child); 96 if (db->db.db_data) 97 child->db_blkptr = (blkptr_t *)db->db.db_data + i; 98 else 99 child->db_blkptr = NULL; 100 dprintf_dbuf_bp(child, child->db_blkptr, 101 "changed db_blkptr to new indirect %s", ""); 102 103 mutex_exit(&child->db_mtx); 104 } 105 106 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); 107 108 dbuf_rele(db, FTAG); 109 110 rw_exit(&dn->dn_struct_rwlock); 111 } 112 113 static void 114 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 115 { 116 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 117 uint64_t bytesfreed = 0; 118 119 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); 120 121 for (int i = 0; i < num; i++, bp++) { 122 if (BP_IS_HOLE(bp)) 123 continue; 124 125 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE); 126 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); 127 128 /* 129 * Save some useful information on the holes being 130 * punched, including logical size, type, and indirection 131 * level. Retaining birth time enables detection of when 132 * holes are punched for reducing the number of free 133 * records transmitted during a zfs send. 134 */ 135 136 uint64_t lsize = BP_GET_LSIZE(bp); 137 dmu_object_type_t type = BP_GET_TYPE(bp); 138 uint64_t lvl = BP_GET_LEVEL(bp); 139 140 bzero(bp, sizeof (blkptr_t)); 141 142 if (spa_feature_is_active(dn->dn_objset->os_spa, 143 SPA_FEATURE_HOLE_BIRTH)) { 144 BP_SET_LSIZE(bp, lsize); 145 BP_SET_TYPE(bp, type); 146 BP_SET_LEVEL(bp, lvl); 147 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0); 148 } 149 } 150 dnode_diduse_space(dn, -bytesfreed); 151 } 152 153 #ifdef ZFS_DEBUG 154 static void 155 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 156 { 157 int off, num; 158 int i, err, epbs; 159 uint64_t txg = tx->tx_txg; 160 dnode_t *dn; 161 162 DB_DNODE_ENTER(db); 163 dn = DB_DNODE(db); 164 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 165 off = start - (db->db_blkid * 1<<epbs); 166 num = end - start + 1; 167 168 ASSERT3U(off, >=, 0); 169 ASSERT3U(num, >=, 0); 170 ASSERT3U(db->db_level, >, 0); 171 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 172 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 173 ASSERT(db->db_blkptr != NULL); 174 175 for (i = off; i < off+num; i++) { 176 uint64_t *buf; 177 dmu_buf_impl_t *child; 178 dbuf_dirty_record_t *dr; 179 int j; 180 181 ASSERT(db->db_level == 1); 182 183 rw_enter(&dn->dn_struct_rwlock, RW_READER); 184 err = dbuf_hold_impl(dn, db->db_level-1, 185 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child); 186 rw_exit(&dn->dn_struct_rwlock); 187 if (err == ENOENT) 188 continue; 189 ASSERT(err == 0); 190 ASSERT(child->db_level == 0); 191 dr = child->db_last_dirty; 192 while (dr && dr->dr_txg > txg) 193 dr = dr->dr_next; 194 ASSERT(dr == NULL || dr->dr_txg == txg); 195 196 /* data_old better be zeroed */ 197 if (dr) { 198 buf = dr->dt.dl.dr_data->b_data; 199 for (j = 0; j < child->db.db_size >> 3; j++) { 200 if (buf[j] != 0) { 201 panic("freed data not zero: " 202 "child=%p i=%d off=%d num=%d\n", 203 (void *)child, i, off, num); 204 } 205 } 206 } 207 208 /* 209 * db_data better be zeroed unless it's dirty in a 210 * future txg. 211 */ 212 mutex_enter(&child->db_mtx); 213 buf = child->db.db_data; 214 if (buf != NULL && child->db_state != DB_FILL && 215 child->db_last_dirty == NULL) { 216 for (j = 0; j < child->db.db_size >> 3; j++) { 217 if (buf[j] != 0) { 218 panic("freed data not zero: " 219 "child=%p i=%d off=%d num=%d\n", 220 (void *)child, i, off, num); 221 } 222 } 223 } 224 mutex_exit(&child->db_mtx); 225 226 dbuf_rele(child, FTAG); 227 } 228 DB_DNODE_EXIT(db); 229 } 230 #endif 231 232 /* 233 * We don't usually free the indirect blocks here. If in one txg we have a 234 * free_range and a write to the same indirect block, it's important that we 235 * preserve the hole's birth times. Therefore, we don't free any any indirect 236 * blocks in free_children(). If an indirect block happens to turn into all 237 * holes, it will be freed by dbuf_write_children_ready, which happens at a 238 * point in the syncing process where we know for certain the contents of the 239 * indirect block. 240 * 241 * However, if we're freeing a dnode, its space accounting must go to zero 242 * before we actually try to free the dnode, or we will trip an assertion. In 243 * addition, we know the case described above cannot occur, because the dnode is 244 * being freed. Therefore, we free the indirect blocks immediately in that 245 * case. 246 */ 247 static void 248 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, 249 boolean_t free_indirects, dmu_tx_t *tx) 250 { 251 dnode_t *dn; 252 blkptr_t *bp; 253 dmu_buf_impl_t *subdb; 254 uint64_t start, end, dbstart, dbend; 255 unsigned int epbs, shift, i; 256 257 /* 258 * There is a small possibility that this block will not be cached: 259 * 1 - if level > 1 and there are no children with level <= 1 260 * 2 - if this block was evicted since we read it from 261 * dmu_tx_hold_free(). 262 */ 263 if (db->db_state != DB_CACHED) 264 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 265 266 /* 267 * If we modify this indirect block, and we are not freeing the 268 * dnode (!free_indirects), then this indirect block needs to get 269 * written to disk by dbuf_write(). If it is dirty, we know it will 270 * be written (otherwise, we would have incorrect on-disk state 271 * because the space would be freed but still referenced by the BP 272 * in this indirect block). Therefore we VERIFY that it is 273 * dirty. 274 * 275 * Our VERIFY covers some cases that do not actually have to be 276 * dirty, but the open-context code happens to dirty. E.g. if the 277 * blocks we are freeing are all holes, because in that case, we 278 * are only freeing part of this indirect block, so it is an 279 * ancestor of the first or last block to be freed. The first and 280 * last L1 indirect blocks are always dirtied by dnode_free_range(). 281 */ 282 VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0); 283 284 dbuf_release_bp(db); 285 bp = db->db.db_data; 286 287 DB_DNODE_ENTER(db); 288 dn = DB_DNODE(db); 289 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 290 ASSERT3U(epbs, <, 31); 291 shift = (db->db_level - 1) * epbs; 292 dbstart = db->db_blkid << epbs; 293 start = blkid >> shift; 294 if (dbstart < start) { 295 bp += start - dbstart; 296 } else { 297 start = dbstart; 298 } 299 dbend = ((db->db_blkid + 1) << epbs) - 1; 300 end = (blkid + nblks - 1) >> shift; 301 if (dbend <= end) 302 end = dbend; 303 304 ASSERT3U(start, <=, end); 305 306 if (db->db_level == 1) { 307 FREE_VERIFY(db, start, end, tx); 308 free_blocks(dn, bp, end-start+1, tx); 309 } else { 310 for (uint64_t id = start; id <= end; id++, bp++) { 311 if (BP_IS_HOLE(bp)) 312 continue; 313 rw_enter(&dn->dn_struct_rwlock, RW_READER); 314 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1, 315 id, TRUE, FALSE, FTAG, &subdb)); 316 rw_exit(&dn->dn_struct_rwlock); 317 ASSERT3P(bp, ==, subdb->db_blkptr); 318 319 free_children(subdb, blkid, nblks, free_indirects, tx); 320 dbuf_rele(subdb, FTAG); 321 } 322 } 323 324 if (free_indirects) { 325 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) 326 ASSERT(BP_IS_HOLE(bp)); 327 bzero(db->db.db_data, db->db.db_size); 328 free_blocks(dn, db->db_blkptr, 1, tx); 329 } 330 331 DB_DNODE_EXIT(db); 332 arc_buf_freeze(db->db_buf); 333 } 334 335 /* 336 * Traverse the indicated range of the provided file 337 * and "free" all the blocks contained there. 338 */ 339 static void 340 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks, 341 boolean_t free_indirects, dmu_tx_t *tx) 342 { 343 blkptr_t *bp = dn->dn_phys->dn_blkptr; 344 int dnlevel = dn->dn_phys->dn_nlevels; 345 boolean_t trunc = B_FALSE; 346 347 if (blkid > dn->dn_phys->dn_maxblkid) 348 return; 349 350 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 351 if (blkid + nblks > dn->dn_phys->dn_maxblkid) { 352 nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 353 trunc = B_TRUE; 354 } 355 356 /* There are no indirect blocks in the object */ 357 if (dnlevel == 1) { 358 if (blkid >= dn->dn_phys->dn_nblkptr) { 359 /* this range was never made persistent */ 360 return; 361 } 362 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 363 free_blocks(dn, bp + blkid, nblks, tx); 364 } else { 365 int shift = (dnlevel - 1) * 366 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 367 int start = blkid >> shift; 368 int end = (blkid + nblks - 1) >> shift; 369 dmu_buf_impl_t *db; 370 371 ASSERT(start < dn->dn_phys->dn_nblkptr); 372 bp += start; 373 for (int i = start; i <= end; i++, bp++) { 374 if (BP_IS_HOLE(bp)) 375 continue; 376 rw_enter(&dn->dn_struct_rwlock, RW_READER); 377 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i, 378 TRUE, FALSE, FTAG, &db)); 379 rw_exit(&dn->dn_struct_rwlock); 380 381 free_children(db, blkid, nblks, free_indirects, tx); 382 dbuf_rele(db, FTAG); 383 } 384 } 385 386 if (trunc) { 387 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1; 388 389 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 390 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 391 ASSERT(off < dn->dn_phys->dn_maxblkid || 392 dn->dn_phys->dn_maxblkid == 0 || 393 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 394 } 395 } 396 397 typedef struct dnode_sync_free_range_arg { 398 dnode_t *dsfra_dnode; 399 dmu_tx_t *dsfra_tx; 400 boolean_t dsfra_free_indirects; 401 } dnode_sync_free_range_arg_t; 402 403 static void 404 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks) 405 { 406 dnode_sync_free_range_arg_t *dsfra = arg; 407 dnode_t *dn = dsfra->dsfra_dnode; 408 409 mutex_exit(&dn->dn_mtx); 410 dnode_sync_free_range_impl(dn, blkid, nblks, 411 dsfra->dsfra_free_indirects, dsfra->dsfra_tx); 412 mutex_enter(&dn->dn_mtx); 413 } 414 415 /* 416 * Try to kick all the dnode's dbufs out of the cache... 417 */ 418 void 419 dnode_evict_dbufs(dnode_t *dn) 420 { 421 dmu_buf_impl_t db_marker; 422 dmu_buf_impl_t *db, *db_next; 423 424 mutex_enter(&dn->dn_dbufs_mtx); 425 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) { 426 427 #ifdef DEBUG 428 DB_DNODE_ENTER(db); 429 ASSERT3P(DB_DNODE(db), ==, dn); 430 DB_DNODE_EXIT(db); 431 #endif /* DEBUG */ 432 433 mutex_enter(&db->db_mtx); 434 if (db->db_state != DB_EVICTING && 435 refcount_is_zero(&db->db_holds)) { 436 db_marker.db_level = db->db_level; 437 db_marker.db_blkid = db->db_blkid; 438 db_marker.db_state = DB_SEARCH; 439 avl_insert_here(&dn->dn_dbufs, &db_marker, db, 440 AVL_BEFORE); 441 442 dbuf_destroy(db); 443 444 db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker); 445 avl_remove(&dn->dn_dbufs, &db_marker); 446 } else { 447 db->db_pending_evict = TRUE; 448 mutex_exit(&db->db_mtx); 449 db_next = AVL_NEXT(&dn->dn_dbufs, db); 450 } 451 } 452 mutex_exit(&dn->dn_dbufs_mtx); 453 454 dnode_evict_bonus(dn); 455 } 456 457 void 458 dnode_evict_bonus(dnode_t *dn) 459 { 460 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 461 if (dn->dn_bonus != NULL) { 462 if (refcount_is_zero(&dn->dn_bonus->db_holds)) { 463 mutex_enter(&dn->dn_bonus->db_mtx); 464 dbuf_destroy(dn->dn_bonus); 465 dn->dn_bonus = NULL; 466 } else { 467 dn->dn_bonus->db_pending_evict = TRUE; 468 } 469 } 470 rw_exit(&dn->dn_struct_rwlock); 471 } 472 473 static void 474 dnode_undirty_dbufs(list_t *list) 475 { 476 dbuf_dirty_record_t *dr; 477 478 while (dr = list_head(list)) { 479 dmu_buf_impl_t *db = dr->dr_dbuf; 480 uint64_t txg = dr->dr_txg; 481 482 if (db->db_level != 0) 483 dnode_undirty_dbufs(&dr->dt.di.dr_children); 484 485 mutex_enter(&db->db_mtx); 486 /* XXX - use dbuf_undirty()? */ 487 list_remove(list, dr); 488 ASSERT(db->db_last_dirty == dr); 489 db->db_last_dirty = NULL; 490 db->db_dirtycnt -= 1; 491 if (db->db_level == 0) { 492 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 493 dr->dt.dl.dr_data == db->db_buf); 494 dbuf_unoverride(dr); 495 } else { 496 mutex_destroy(&dr->dt.di.dr_mtx); 497 list_destroy(&dr->dt.di.dr_children); 498 } 499 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 500 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 501 } 502 } 503 504 static void 505 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 506 { 507 int txgoff = tx->tx_txg & TXG_MASK; 508 509 ASSERT(dmu_tx_is_syncing(tx)); 510 511 /* 512 * Our contents should have been freed in dnode_sync() by the 513 * free range record inserted by the caller of dnode_free(). 514 */ 515 ASSERT0(DN_USED_BYTES(dn->dn_phys)); 516 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); 517 518 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); 519 dnode_evict_dbufs(dn); 520 521 /* 522 * XXX - It would be nice to assert this, but we may still 523 * have residual holds from async evictions from the arc... 524 * 525 * zfs_obj_to_path() also depends on this being 526 * commented out. 527 * 528 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); 529 */ 530 531 /* Undirty next bits */ 532 dn->dn_next_nlevels[txgoff] = 0; 533 dn->dn_next_indblkshift[txgoff] = 0; 534 dn->dn_next_blksz[txgoff] = 0; 535 536 /* ASSERT(blkptrs are zero); */ 537 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 538 ASSERT(dn->dn_type != DMU_OT_NONE); 539 540 ASSERT(dn->dn_free_txg > 0); 541 if (dn->dn_allocated_txg != dn->dn_free_txg) 542 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx); 543 bzero(dn->dn_phys, sizeof (dnode_phys_t)); 544 545 mutex_enter(&dn->dn_mtx); 546 dn->dn_type = DMU_OT_NONE; 547 dn->dn_maxblkid = 0; 548 dn->dn_allocated_txg = 0; 549 dn->dn_free_txg = 0; 550 dn->dn_have_spill = B_FALSE; 551 mutex_exit(&dn->dn_mtx); 552 553 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 554 555 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 556 /* 557 * Now that we've released our hold, the dnode may 558 * be evicted, so we musn't access it. 559 */ 560 } 561 562 /* 563 * Write out the dnode's dirty buffers. 564 */ 565 void 566 dnode_sync(dnode_t *dn, dmu_tx_t *tx) 567 { 568 dnode_phys_t *dnp = dn->dn_phys; 569 int txgoff = tx->tx_txg & TXG_MASK; 570 list_t *list = &dn->dn_dirty_records[txgoff]; 571 static const dnode_phys_t zerodn = { 0 }; 572 boolean_t kill_spill = B_FALSE; 573 574 ASSERT(dmu_tx_is_syncing(tx)); 575 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 576 ASSERT(dnp->dn_type != DMU_OT_NONE || 577 bcmp(dnp, &zerodn, DNODE_SIZE) == 0); 578 DNODE_VERIFY(dn); 579 580 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); 581 582 if (dmu_objset_userused_enabled(dn->dn_objset) && 583 !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 584 mutex_enter(&dn->dn_mtx); 585 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys); 586 dn->dn_oldflags = dn->dn_phys->dn_flags; 587 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED; 588 mutex_exit(&dn->dn_mtx); 589 dmu_objset_userquota_get_ids(dn, B_FALSE, tx); 590 } else { 591 /* Once we account for it, we should always account for it. */ 592 ASSERT(!(dn->dn_phys->dn_flags & 593 DNODE_FLAG_USERUSED_ACCOUNTED)); 594 } 595 596 mutex_enter(&dn->dn_mtx); 597 if (dn->dn_allocated_txg == tx->tx_txg) { 598 /* The dnode is newly allocated or reallocated */ 599 if (dnp->dn_type == DMU_OT_NONE) { 600 /* this is a first alloc, not a realloc */ 601 dnp->dn_nlevels = 1; 602 dnp->dn_nblkptr = dn->dn_nblkptr; 603 } 604 605 dnp->dn_type = dn->dn_type; 606 dnp->dn_bonustype = dn->dn_bonustype; 607 dnp->dn_bonuslen = dn->dn_bonuslen; 608 } 609 ASSERT(dnp->dn_nlevels > 1 || 610 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 611 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) || 612 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 613 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 614 ASSERT(dnp->dn_nlevels < 2 || 615 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 616 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift); 617 618 if (dn->dn_next_type[txgoff] != 0) { 619 dnp->dn_type = dn->dn_type; 620 dn->dn_next_type[txgoff] = 0; 621 } 622 623 if (dn->dn_next_blksz[txgoff] != 0) { 624 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], 625 SPA_MINBLOCKSIZE) == 0); 626 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || 627 dn->dn_maxblkid == 0 || list_head(list) != NULL || 628 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == 629 dnp->dn_datablkszsec || 630 !range_tree_is_empty(dn->dn_free_ranges[txgoff])); 631 dnp->dn_datablkszsec = 632 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; 633 dn->dn_next_blksz[txgoff] = 0; 634 } 635 636 if (dn->dn_next_bonuslen[txgoff] != 0) { 637 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) 638 dnp->dn_bonuslen = 0; 639 else 640 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; 641 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN); 642 dn->dn_next_bonuslen[txgoff] = 0; 643 } 644 645 if (dn->dn_next_bonustype[txgoff] != 0) { 646 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff])); 647 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff]; 648 dn->dn_next_bonustype[txgoff] = 0; 649 } 650 651 boolean_t freeing_dnode = dn->dn_free_txg > 0 && 652 dn->dn_free_txg <= tx->tx_txg; 653 654 /* 655 * Remove the spill block if we have been explicitly asked to 656 * remove it, or if the object is being removed. 657 */ 658 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) { 659 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 660 kill_spill = B_TRUE; 661 dn->dn_rm_spillblk[txgoff] = 0; 662 } 663 664 if (dn->dn_next_indblkshift[txgoff] != 0) { 665 ASSERT(dnp->dn_nlevels == 1); 666 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 667 dn->dn_next_indblkshift[txgoff] = 0; 668 } 669 670 /* 671 * Just take the live (open-context) values for checksum and compress. 672 * Strictly speaking it's a future leak, but nothing bad happens if we 673 * start using the new checksum or compress algorithm a little early. 674 */ 675 dnp->dn_checksum = dn->dn_checksum; 676 dnp->dn_compress = dn->dn_compress; 677 678 mutex_exit(&dn->dn_mtx); 679 680 if (kill_spill) { 681 free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx); 682 mutex_enter(&dn->dn_mtx); 683 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR; 684 mutex_exit(&dn->dn_mtx); 685 } 686 687 /* process all the "freed" ranges in the file */ 688 if (dn->dn_free_ranges[txgoff] != NULL) { 689 dnode_sync_free_range_arg_t dsfra; 690 dsfra.dsfra_dnode = dn; 691 dsfra.dsfra_tx = tx; 692 dsfra.dsfra_free_indirects = freeing_dnode; 693 if (freeing_dnode) { 694 ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff], 695 0, dn->dn_maxblkid + 1)); 696 } 697 mutex_enter(&dn->dn_mtx); 698 range_tree_vacate(dn->dn_free_ranges[txgoff], 699 dnode_sync_free_range, &dsfra); 700 range_tree_destroy(dn->dn_free_ranges[txgoff]); 701 dn->dn_free_ranges[txgoff] = NULL; 702 mutex_exit(&dn->dn_mtx); 703 } 704 705 if (freeing_dnode) { 706 dn->dn_objset->os_freed_dnodes++; 707 dnode_sync_free(dn, tx); 708 return; 709 } 710 711 if (dn->dn_next_nlevels[txgoff]) { 712 dnode_increase_indirection(dn, tx); 713 dn->dn_next_nlevels[txgoff] = 0; 714 } 715 716 if (dn->dn_next_nblkptr[txgoff]) { 717 /* this should only happen on a realloc */ 718 ASSERT(dn->dn_allocated_txg == tx->tx_txg); 719 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) { 720 /* zero the new blkptrs we are gaining */ 721 bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 722 sizeof (blkptr_t) * 723 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr)); 724 #ifdef ZFS_DEBUG 725 } else { 726 int i; 727 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr); 728 /* the blkptrs we are losing better be unallocated */ 729 for (i = dn->dn_next_nblkptr[txgoff]; 730 i < dnp->dn_nblkptr; i++) 731 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i])); 732 #endif 733 } 734 mutex_enter(&dn->dn_mtx); 735 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff]; 736 dn->dn_next_nblkptr[txgoff] = 0; 737 mutex_exit(&dn->dn_mtx); 738 } 739 740 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx); 741 742 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 743 ASSERT3P(list_head(list), ==, NULL); 744 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 745 } 746 747 /* 748 * Although we have dropped our reference to the dnode, it 749 * can't be evicted until its written, and we haven't yet 750 * initiated the IO for the dnode's dbuf. 751 */ 752 } 753