1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2020 Oxide Computer Company 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/dbuf.h> 31 #include <sys/dnode.h> 32 #include <sys/dmu.h> 33 #include <sys/dmu_tx.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_recv.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/spa.h> 38 #include <sys/range_tree.h> 39 #include <sys/zfeature.h> 40 41 static void 42 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 43 { 44 dmu_buf_impl_t *db; 45 int txgoff = tx->tx_txg & TXG_MASK; 46 int nblkptr = dn->dn_phys->dn_nblkptr; 47 int old_toplvl = dn->dn_phys->dn_nlevels - 1; 48 int new_level = dn->dn_next_nlevels[txgoff]; 49 int i; 50 51 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 52 53 /* this dnode can't be paged out because it's dirty */ 54 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 55 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 56 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); 57 58 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 59 ASSERT(db != NULL); 60 61 dn->dn_phys->dn_nlevels = new_level; 62 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, 63 dn->dn_object, dn->dn_phys->dn_nlevels); 64 65 /* transfer dnode's block pointers to new indirect block */ 66 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); 67 ASSERT(db->db.db_data); 68 ASSERT(arc_released(db->db_buf)); 69 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); 70 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 71 sizeof (blkptr_t) * nblkptr); 72 arc_buf_freeze(db->db_buf); 73 74 /* set dbuf's parent pointers to new indirect buf */ 75 for (i = 0; i < nblkptr; i++) { 76 dmu_buf_impl_t *child = 77 dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i); 78 79 if (child == NULL) 80 continue; 81 #ifdef DEBUG 82 DB_DNODE_ENTER(child); 83 ASSERT3P(DB_DNODE(child), ==, dn); 84 DB_DNODE_EXIT(child); 85 #endif /* DEBUG */ 86 if (child->db_parent && child->db_parent != dn->dn_dbuf) { 87 ASSERT(child->db_parent->db_level == db->db_level); 88 ASSERT(child->db_blkptr != 89 &dn->dn_phys->dn_blkptr[child->db_blkid]); 90 mutex_exit(&child->db_mtx); 91 continue; 92 } 93 ASSERT(child->db_parent == NULL || 94 child->db_parent == dn->dn_dbuf); 95 96 child->db_parent = db; 97 dbuf_add_ref(db, child); 98 if (db->db.db_data) 99 child->db_blkptr = (blkptr_t *)db->db.db_data + i; 100 else 101 child->db_blkptr = NULL; 102 dprintf_dbuf_bp(child, child->db_blkptr, 103 "changed db_blkptr to new indirect %s", ""); 104 105 mutex_exit(&child->db_mtx); 106 } 107 108 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); 109 110 dbuf_rele(db, FTAG); 111 112 rw_exit(&dn->dn_struct_rwlock); 113 } 114 115 static void 116 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 117 { 118 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 119 uint64_t bytesfreed = 0; 120 121 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); 122 123 for (int i = 0; i < num; i++, bp++) { 124 if (BP_IS_HOLE(bp)) 125 continue; 126 127 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE); 128 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); 129 130 /* 131 * Save some useful information on the holes being 132 * punched, including logical size, type, and indirection 133 * level. Retaining birth time enables detection of when 134 * holes are punched for reducing the number of free 135 * records transmitted during a zfs send. 136 */ 137 138 uint64_t lsize = BP_GET_LSIZE(bp); 139 dmu_object_type_t type = BP_GET_TYPE(bp); 140 uint64_t lvl = BP_GET_LEVEL(bp); 141 142 bzero(bp, sizeof (blkptr_t)); 143 144 if (spa_feature_is_active(dn->dn_objset->os_spa, 145 SPA_FEATURE_HOLE_BIRTH)) { 146 BP_SET_LSIZE(bp, lsize); 147 BP_SET_TYPE(bp, type); 148 BP_SET_LEVEL(bp, lvl); 149 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0); 150 } 151 } 152 dnode_diduse_space(dn, -bytesfreed); 153 } 154 155 #ifdef ZFS_DEBUG 156 static void 157 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 158 { 159 int off, num; 160 int i, err, epbs; 161 uint64_t txg = tx->tx_txg; 162 dnode_t *dn; 163 164 DB_DNODE_ENTER(db); 165 dn = DB_DNODE(db); 166 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 167 off = start - (db->db_blkid * 1<<epbs); 168 num = end - start + 1; 169 170 ASSERT3U(off, >=, 0); 171 ASSERT3U(num, >=, 0); 172 ASSERT3U(db->db_level, >, 0); 173 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 174 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 175 ASSERT(db->db_blkptr != NULL); 176 177 for (i = off; i < off+num; i++) { 178 uint64_t *buf; 179 dmu_buf_impl_t *child; 180 dbuf_dirty_record_t *dr; 181 int j; 182 183 ASSERT(db->db_level == 1); 184 185 rw_enter(&dn->dn_struct_rwlock, RW_READER); 186 err = dbuf_hold_impl(dn, db->db_level-1, 187 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child); 188 rw_exit(&dn->dn_struct_rwlock); 189 if (err == ENOENT) 190 continue; 191 ASSERT(err == 0); 192 ASSERT(child->db_level == 0); 193 dr = child->db_last_dirty; 194 while (dr && dr->dr_txg > txg) 195 dr = dr->dr_next; 196 ASSERT(dr == NULL || dr->dr_txg == txg); 197 198 /* data_old better be zeroed */ 199 if (dr) { 200 buf = dr->dt.dl.dr_data->b_data; 201 for (j = 0; j < child->db.db_size >> 3; j++) { 202 if (buf[j] != 0) { 203 panic("freed data not zero: " 204 "child=%p i=%d off=%d num=%d\n", 205 (void *)child, i, off, num); 206 } 207 } 208 } 209 210 /* 211 * db_data better be zeroed unless it's dirty in a 212 * future txg. 213 */ 214 mutex_enter(&child->db_mtx); 215 buf = child->db.db_data; 216 if (buf != NULL && child->db_state != DB_FILL && 217 child->db_last_dirty == NULL) { 218 for (j = 0; j < child->db.db_size >> 3; j++) { 219 if (buf[j] != 0) { 220 panic("freed data not zero: " 221 "child=%p i=%d off=%d num=%d\n", 222 (void *)child, i, off, num); 223 } 224 } 225 } 226 mutex_exit(&child->db_mtx); 227 228 dbuf_rele(child, FTAG); 229 } 230 DB_DNODE_EXIT(db); 231 } 232 #endif 233 234 /* 235 * We don't usually free the indirect blocks here. If in one txg we have a 236 * free_range and a write to the same indirect block, it's important that we 237 * preserve the hole's birth times. Therefore, we don't free any any indirect 238 * blocks in free_children(). If an indirect block happens to turn into all 239 * holes, it will be freed by dbuf_write_children_ready, which happens at a 240 * point in the syncing process where we know for certain the contents of the 241 * indirect block. 242 * 243 * However, if we're freeing a dnode, its space accounting must go to zero 244 * before we actually try to free the dnode, or we will trip an assertion. In 245 * addition, we know the case described above cannot occur, because the dnode is 246 * being freed. Therefore, we free the indirect blocks immediately in that 247 * case. 248 */ 249 static void 250 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, 251 boolean_t free_indirects, dmu_tx_t *tx) 252 { 253 dnode_t *dn; 254 blkptr_t *bp; 255 dmu_buf_impl_t *subdb; 256 uint64_t start, end, dbstart, dbend; 257 unsigned int epbs, shift, i; 258 259 /* 260 * There is a small possibility that this block will not be cached: 261 * 1 - if level > 1 and there are no children with level <= 1 262 * 2 - if this block was evicted since we read it from 263 * dmu_tx_hold_free(). 264 */ 265 if (db->db_state != DB_CACHED) 266 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 267 268 /* 269 * If we modify this indirect block, and we are not freeing the 270 * dnode (!free_indirects), then this indirect block needs to get 271 * written to disk by dbuf_write(). If it is dirty, we know it will 272 * be written (otherwise, we would have incorrect on-disk state 273 * because the space would be freed but still referenced by the BP 274 * in this indirect block). Therefore we VERIFY that it is 275 * dirty. 276 * 277 * Our VERIFY covers some cases that do not actually have to be 278 * dirty, but the open-context code happens to dirty. E.g. if the 279 * blocks we are freeing are all holes, because in that case, we 280 * are only freeing part of this indirect block, so it is an 281 * ancestor of the first or last block to be freed. The first and 282 * last L1 indirect blocks are always dirtied by dnode_free_range(). 283 */ 284 VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0); 285 286 dbuf_release_bp(db); 287 bp = db->db.db_data; 288 289 DB_DNODE_ENTER(db); 290 dn = DB_DNODE(db); 291 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 292 ASSERT3U(epbs, <, 31); 293 shift = (db->db_level - 1) * epbs; 294 dbstart = db->db_blkid << epbs; 295 start = blkid >> shift; 296 if (dbstart < start) { 297 bp += start - dbstart; 298 } else { 299 start = dbstart; 300 } 301 dbend = ((db->db_blkid + 1) << epbs) - 1; 302 end = (blkid + nblks - 1) >> shift; 303 if (dbend <= end) 304 end = dbend; 305 306 ASSERT3U(start, <=, end); 307 308 if (db->db_level == 1) { 309 FREE_VERIFY(db, start, end, tx); 310 free_blocks(dn, bp, end-start+1, tx); 311 } else { 312 for (uint64_t id = start; id <= end; id++, bp++) { 313 if (BP_IS_HOLE(bp)) 314 continue; 315 rw_enter(&dn->dn_struct_rwlock, RW_READER); 316 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1, 317 id, TRUE, FALSE, FTAG, &subdb)); 318 rw_exit(&dn->dn_struct_rwlock); 319 ASSERT3P(bp, ==, subdb->db_blkptr); 320 321 free_children(subdb, blkid, nblks, free_indirects, tx); 322 dbuf_rele(subdb, FTAG); 323 } 324 } 325 326 if (free_indirects) { 327 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) 328 ASSERT(BP_IS_HOLE(bp)); 329 bzero(db->db.db_data, db->db.db_size); 330 free_blocks(dn, db->db_blkptr, 1, tx); 331 } 332 333 DB_DNODE_EXIT(db); 334 arc_buf_freeze(db->db_buf); 335 } 336 337 /* 338 * Traverse the indicated range of the provided file 339 * and "free" all the blocks contained there. 340 */ 341 static void 342 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks, 343 boolean_t free_indirects, dmu_tx_t *tx) 344 { 345 blkptr_t *bp = dn->dn_phys->dn_blkptr; 346 int dnlevel = dn->dn_phys->dn_nlevels; 347 boolean_t trunc = B_FALSE; 348 349 if (blkid > dn->dn_phys->dn_maxblkid) 350 return; 351 352 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 353 if (blkid + nblks > dn->dn_phys->dn_maxblkid) { 354 nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 355 trunc = B_TRUE; 356 } 357 358 /* There are no indirect blocks in the object */ 359 if (dnlevel == 1) { 360 if (blkid >= dn->dn_phys->dn_nblkptr) { 361 /* this range was never made persistent */ 362 return; 363 } 364 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 365 free_blocks(dn, bp + blkid, nblks, tx); 366 } else { 367 int shift = (dnlevel - 1) * 368 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 369 int start = blkid >> shift; 370 int end = (blkid + nblks - 1) >> shift; 371 dmu_buf_impl_t *db; 372 373 ASSERT(start < dn->dn_phys->dn_nblkptr); 374 bp += start; 375 for (int i = start; i <= end; i++, bp++) { 376 if (BP_IS_HOLE(bp)) 377 continue; 378 rw_enter(&dn->dn_struct_rwlock, RW_READER); 379 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i, 380 TRUE, FALSE, FTAG, &db)); 381 rw_exit(&dn->dn_struct_rwlock); 382 383 free_children(db, blkid, nblks, free_indirects, tx); 384 dbuf_rele(db, FTAG); 385 } 386 } 387 388 /* 389 * Do not truncate the maxblkid if we are performing a raw 390 * receive. The raw receive sets the maxblkid manually and 391 * must not be overridden. Usually, the last DRR_FREE record 392 * will be at the maxblkid, because the source system sets 393 * the maxblkid when truncating. However, if the last block 394 * was freed by overwriting with zeros and being compressed 395 * away to a hole, the source system will generate a DRR_FREE 396 * record while leaving the maxblkid after the end of that 397 * record. In this case we need to leave the maxblkid as 398 * indicated in the DRR_OBJECT record, so that it matches the 399 * source system, ensuring that the cryptographic hashes will 400 * match. 401 */ 402 if (trunc && !dn->dn_objset->os_raw_receive) { 403 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1; 404 405 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 406 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 407 ASSERT(off < dn->dn_phys->dn_maxblkid || 408 dn->dn_phys->dn_maxblkid == 0 || 409 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 410 } 411 } 412 413 typedef struct dnode_sync_free_range_arg { 414 dnode_t *dsfra_dnode; 415 dmu_tx_t *dsfra_tx; 416 boolean_t dsfra_free_indirects; 417 } dnode_sync_free_range_arg_t; 418 419 static void 420 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks) 421 { 422 dnode_sync_free_range_arg_t *dsfra = arg; 423 dnode_t *dn = dsfra->dsfra_dnode; 424 425 mutex_exit(&dn->dn_mtx); 426 dnode_sync_free_range_impl(dn, blkid, nblks, 427 dsfra->dsfra_free_indirects, dsfra->dsfra_tx); 428 mutex_enter(&dn->dn_mtx); 429 } 430 431 /* 432 * Try to kick all the dnode's dbufs out of the cache... 433 */ 434 void 435 dnode_evict_dbufs(dnode_t *dn) 436 { 437 dmu_buf_impl_t db_marker; 438 dmu_buf_impl_t *db, *db_next; 439 440 mutex_enter(&dn->dn_dbufs_mtx); 441 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) { 442 443 #ifdef DEBUG 444 DB_DNODE_ENTER(db); 445 ASSERT3P(DB_DNODE(db), ==, dn); 446 DB_DNODE_EXIT(db); 447 #endif /* DEBUG */ 448 449 mutex_enter(&db->db_mtx); 450 if (db->db_state != DB_EVICTING && 451 zfs_refcount_is_zero(&db->db_holds)) { 452 db_marker.db_level = db->db_level; 453 db_marker.db_blkid = db->db_blkid; 454 db_marker.db_state = DB_SEARCH; 455 avl_insert_here(&dn->dn_dbufs, &db_marker, db, 456 AVL_BEFORE); 457 458 /* 459 * We need to use the "marker" dbuf rather than 460 * simply getting the next dbuf, because 461 * dbuf_destroy() may actually remove multiple dbufs. 462 * It can call itself recursively on the parent dbuf, 463 * which may also be removed from dn_dbufs. The code 464 * flow would look like: 465 * 466 * dbuf_destroy(): 467 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE): 468 * if (!cacheable || pending_evict) 469 * dbuf_destroy() 470 */ 471 dbuf_destroy(db); 472 473 db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker); 474 avl_remove(&dn->dn_dbufs, &db_marker); 475 } else { 476 db->db_pending_evict = TRUE; 477 mutex_exit(&db->db_mtx); 478 db_next = AVL_NEXT(&dn->dn_dbufs, db); 479 } 480 } 481 mutex_exit(&dn->dn_dbufs_mtx); 482 483 dnode_evict_bonus(dn); 484 } 485 486 void 487 dnode_evict_bonus(dnode_t *dn) 488 { 489 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 490 if (dn->dn_bonus != NULL) { 491 if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) { 492 mutex_enter(&dn->dn_bonus->db_mtx); 493 dbuf_destroy(dn->dn_bonus); 494 dn->dn_bonus = NULL; 495 } else { 496 dn->dn_bonus->db_pending_evict = TRUE; 497 } 498 } 499 rw_exit(&dn->dn_struct_rwlock); 500 } 501 502 static void 503 dnode_undirty_dbufs(list_t *list) 504 { 505 dbuf_dirty_record_t *dr; 506 507 while (dr = list_head(list)) { 508 dmu_buf_impl_t *db = dr->dr_dbuf; 509 uint64_t txg = dr->dr_txg; 510 511 if (db->db_level != 0) 512 dnode_undirty_dbufs(&dr->dt.di.dr_children); 513 514 mutex_enter(&db->db_mtx); 515 /* XXX - use dbuf_undirty()? */ 516 list_remove(list, dr); 517 ASSERT(db->db_last_dirty == dr); 518 db->db_last_dirty = NULL; 519 db->db_dirtycnt -= 1; 520 if (db->db_level == 0) { 521 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 522 dr->dt.dl.dr_data == db->db_buf); 523 dbuf_unoverride(dr); 524 } else { 525 mutex_destroy(&dr->dt.di.dr_mtx); 526 list_destroy(&dr->dt.di.dr_children); 527 } 528 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 529 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE); 530 } 531 } 532 533 static void 534 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 535 { 536 int txgoff = tx->tx_txg & TXG_MASK; 537 538 ASSERT(dmu_tx_is_syncing(tx)); 539 540 /* 541 * Our contents should have been freed in dnode_sync() by the 542 * free range record inserted by the caller of dnode_free(). 543 */ 544 ASSERT0(DN_USED_BYTES(dn->dn_phys)); 545 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); 546 547 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); 548 dnode_evict_dbufs(dn); 549 550 /* 551 * XXX - It would be nice to assert this, but we may still 552 * have residual holds from async evictions from the arc... 553 * 554 * zfs_obj_to_path() also depends on this being 555 * commented out. 556 * 557 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1); 558 */ 559 560 /* Undirty next bits */ 561 dn->dn_next_nlevels[txgoff] = 0; 562 dn->dn_next_indblkshift[txgoff] = 0; 563 dn->dn_next_blksz[txgoff] = 0; 564 dn->dn_next_maxblkid[txgoff] = 0; 565 566 /* ASSERT(blkptrs are zero); */ 567 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 568 ASSERT(dn->dn_type != DMU_OT_NONE); 569 570 ASSERT(dn->dn_free_txg > 0); 571 if (dn->dn_allocated_txg != dn->dn_free_txg) 572 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx); 573 bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots); 574 dnode_free_interior_slots(dn); 575 576 mutex_enter(&dn->dn_mtx); 577 dn->dn_type = DMU_OT_NONE; 578 dn->dn_maxblkid = 0; 579 dn->dn_allocated_txg = 0; 580 dn->dn_free_txg = 0; 581 dn->dn_have_spill = B_FALSE; 582 dn->dn_num_slots = 1; 583 mutex_exit(&dn->dn_mtx); 584 585 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 586 587 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 588 /* 589 * Now that we've released our hold, the dnode may 590 * be evicted, so we mustn't access it. 591 */ 592 } 593 594 /* 595 * Write out the dnode's dirty buffers. 596 */ 597 void 598 dnode_sync(dnode_t *dn, dmu_tx_t *tx) 599 { 600 objset_t *os = dn->dn_objset; 601 dnode_phys_t *dnp = dn->dn_phys; 602 int txgoff = tx->tx_txg & TXG_MASK; 603 list_t *list = &dn->dn_dirty_records[txgoff]; 604 static const dnode_phys_t zerodn = { 0 }; 605 boolean_t kill_spill = B_FALSE; 606 607 ASSERT(dmu_tx_is_syncing(tx)); 608 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 609 ASSERT(dnp->dn_type != DMU_OT_NONE || 610 bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0); 611 DNODE_VERIFY(dn); 612 613 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); 614 615 /* 616 * Do user accounting if it is enabled and this is not 617 * an encrypted receive. 618 */ 619 if (dmu_objset_userused_enabled(os) && 620 !DMU_OBJECT_IS_SPECIAL(dn->dn_object) && 621 (!os->os_encrypted || !dmu_objset_is_receiving(os))) { 622 mutex_enter(&dn->dn_mtx); 623 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys); 624 dn->dn_oldflags = dn->dn_phys->dn_flags; 625 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED; 626 if (dmu_objset_userobjused_enabled(dn->dn_objset)) 627 dn->dn_phys->dn_flags |= 628 DNODE_FLAG_USEROBJUSED_ACCOUNTED; 629 mutex_exit(&dn->dn_mtx); 630 dmu_objset_userquota_get_ids(dn, B_FALSE, tx); 631 } else { 632 /* Once we account for it, we should always account for it */ 633 ASSERT(!(dn->dn_phys->dn_flags & 634 DNODE_FLAG_USERUSED_ACCOUNTED)); 635 ASSERT(!(dn->dn_phys->dn_flags & 636 DNODE_FLAG_USEROBJUSED_ACCOUNTED)); 637 } 638 639 mutex_enter(&dn->dn_mtx); 640 if (dn->dn_allocated_txg == tx->tx_txg) { 641 /* The dnode is newly allocated or reallocated */ 642 if (dnp->dn_type == DMU_OT_NONE) { 643 /* this is a first alloc, not a realloc */ 644 dnp->dn_nlevels = 1; 645 dnp->dn_nblkptr = dn->dn_nblkptr; 646 } 647 648 dnp->dn_type = dn->dn_type; 649 dnp->dn_bonustype = dn->dn_bonustype; 650 dnp->dn_bonuslen = dn->dn_bonuslen; 651 } 652 653 dnp->dn_extra_slots = dn->dn_num_slots - 1; 654 655 ASSERT(dnp->dn_nlevels > 1 || 656 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 657 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) || 658 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 659 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 660 ASSERT(dnp->dn_nlevels < 2 || 661 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 662 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift); 663 664 if (dn->dn_next_type[txgoff] != 0) { 665 dnp->dn_type = dn->dn_type; 666 dn->dn_next_type[txgoff] = 0; 667 } 668 669 if (dn->dn_next_blksz[txgoff] != 0) { 670 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], 671 SPA_MINBLOCKSIZE) == 0); 672 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || 673 dn->dn_maxblkid == 0 || list_head(list) != NULL || 674 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == 675 dnp->dn_datablkszsec || 676 !range_tree_is_empty(dn->dn_free_ranges[txgoff])); 677 dnp->dn_datablkszsec = 678 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; 679 dn->dn_next_blksz[txgoff] = 0; 680 } 681 682 if (dn->dn_next_bonuslen[txgoff] != 0) { 683 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) 684 dnp->dn_bonuslen = 0; 685 else 686 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; 687 ASSERT(dnp->dn_bonuslen <= 688 DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1)); 689 dn->dn_next_bonuslen[txgoff] = 0; 690 } 691 692 if (dn->dn_next_bonustype[txgoff] != 0) { 693 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff])); 694 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff]; 695 dn->dn_next_bonustype[txgoff] = 0; 696 } 697 698 boolean_t freeing_dnode = dn->dn_free_txg > 0 && 699 dn->dn_free_txg <= tx->tx_txg; 700 701 /* 702 * Remove the spill block if we have been explicitly asked to 703 * remove it, or if the object is being removed. 704 */ 705 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) { 706 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 707 kill_spill = B_TRUE; 708 dn->dn_rm_spillblk[txgoff] = 0; 709 } 710 711 if (dn->dn_next_indblkshift[txgoff] != 0) { 712 ASSERT(dnp->dn_nlevels == 1); 713 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 714 dn->dn_next_indblkshift[txgoff] = 0; 715 } 716 717 /* 718 * Just take the live (open-context) values for checksum and compress. 719 * Strictly speaking it's a future leak, but nothing bad happens if we 720 * start using the new checksum or compress algorithm a little early. 721 */ 722 dnp->dn_checksum = dn->dn_checksum; 723 dnp->dn_compress = dn->dn_compress; 724 725 mutex_exit(&dn->dn_mtx); 726 727 if (kill_spill) { 728 free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx); 729 mutex_enter(&dn->dn_mtx); 730 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR; 731 mutex_exit(&dn->dn_mtx); 732 } 733 734 /* process all the "freed" ranges in the file */ 735 if (dn->dn_free_ranges[txgoff] != NULL) { 736 dnode_sync_free_range_arg_t dsfra; 737 dsfra.dsfra_dnode = dn; 738 dsfra.dsfra_tx = tx; 739 dsfra.dsfra_free_indirects = freeing_dnode; 740 mutex_enter(&dn->dn_mtx); 741 if (freeing_dnode) { 742 ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff], 743 0, dn->dn_maxblkid + 1)); 744 } 745 /* 746 * Because dnode_sync_free_range() must drop dn_mtx during its 747 * processing, using it as a callback to range_tree_vacate() is 748 * not safe. No other operations (besides destroy) are allowed 749 * once range_tree_vacate() has begun, and dropping dn_mtx 750 * would leave a window open for another thread to observe that 751 * invalid (and unsafe) state. 752 */ 753 range_tree_walk(dn->dn_free_ranges[txgoff], 754 dnode_sync_free_range, &dsfra); 755 range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL); 756 range_tree_destroy(dn->dn_free_ranges[txgoff]); 757 dn->dn_free_ranges[txgoff] = NULL; 758 mutex_exit(&dn->dn_mtx); 759 } 760 761 if (freeing_dnode) { 762 dn->dn_objset->os_freed_dnodes++; 763 dnode_sync_free(dn, tx); 764 return; 765 } 766 767 if (dn->dn_num_slots > DNODE_MIN_SLOTS) { 768 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 769 mutex_enter(&ds->ds_lock); 770 ds->ds_feature_activation_needed[SPA_FEATURE_LARGE_DNODE] = 771 B_TRUE; 772 mutex_exit(&ds->ds_lock); 773 } 774 775 if (dn->dn_next_nlevels[txgoff]) { 776 dnode_increase_indirection(dn, tx); 777 dn->dn_next_nlevels[txgoff] = 0; 778 } 779 780 /* 781 * This must be done after dnode_sync_free_range() 782 * and dnode_increase_indirection(). See dnode_new_blkid() 783 * for an explanation of the high bit being set. 784 */ 785 if (dn->dn_next_maxblkid[txgoff]) { 786 mutex_enter(&dn->dn_mtx); 787 dnp->dn_maxblkid = 788 dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET; 789 dn->dn_next_maxblkid[txgoff] = 0; 790 mutex_exit(&dn->dn_mtx); 791 } 792 793 if (dn->dn_next_nblkptr[txgoff]) { 794 /* this should only happen on a realloc */ 795 ASSERT(dn->dn_allocated_txg == tx->tx_txg); 796 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) { 797 /* zero the new blkptrs we are gaining */ 798 bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 799 sizeof (blkptr_t) * 800 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr)); 801 #ifdef ZFS_DEBUG 802 } else { 803 int i; 804 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr); 805 /* the blkptrs we are losing better be unallocated */ 806 for (i = dn->dn_next_nblkptr[txgoff]; 807 i < dnp->dn_nblkptr; i++) 808 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i])); 809 #endif 810 } 811 mutex_enter(&dn->dn_mtx); 812 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff]; 813 dn->dn_next_nblkptr[txgoff] = 0; 814 mutex_exit(&dn->dn_mtx); 815 } 816 817 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx); 818 819 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 820 ASSERT3P(list_head(list), ==, NULL); 821 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 822 } 823 824 /* 825 * Although we have dropped our reference to the dnode, it 826 * can't be evicted until its written, and we haven't yet 827 * initiated the IO for the dnode's dbuf. 828 */ 829 } 830