1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/dbuf.h> 30 #include <sys/dnode.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dmu_objset.h> 34 #include <sys/dsl_dataset.h> 35 #include <sys/spa.h> 36 #include <sys/range_tree.h> 37 #include <sys/zfeature.h> 38 39 static void 40 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 41 { 42 dmu_buf_impl_t *db; 43 int txgoff = tx->tx_txg & TXG_MASK; 44 int nblkptr = dn->dn_phys->dn_nblkptr; 45 int old_toplvl = dn->dn_phys->dn_nlevels - 1; 46 int new_level = dn->dn_next_nlevels[txgoff]; 47 int i; 48 49 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 50 51 /* this dnode can't be paged out because it's dirty */ 52 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 53 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 54 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0); 55 56 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 57 ASSERT(db != NULL); 58 59 dn->dn_phys->dn_nlevels = new_level; 60 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset, 61 dn->dn_object, dn->dn_phys->dn_nlevels); 62 63 /* transfer dnode's block pointers to new indirect block */ 64 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT); 65 ASSERT(db->db.db_data); 66 ASSERT(arc_released(db->db_buf)); 67 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size); 68 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 69 sizeof (blkptr_t) * nblkptr); 70 arc_buf_freeze(db->db_buf); 71 72 /* set dbuf's parent pointers to new indirect buf */ 73 for (i = 0; i < nblkptr; i++) { 74 dmu_buf_impl_t *child = 75 dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i); 76 77 if (child == NULL) 78 continue; 79 #ifdef DEBUG 80 DB_DNODE_ENTER(child); 81 ASSERT3P(DB_DNODE(child), ==, dn); 82 DB_DNODE_EXIT(child); 83 #endif /* DEBUG */ 84 if (child->db_parent && child->db_parent != dn->dn_dbuf) { 85 ASSERT(child->db_parent->db_level == db->db_level); 86 ASSERT(child->db_blkptr != 87 &dn->dn_phys->dn_blkptr[child->db_blkid]); 88 mutex_exit(&child->db_mtx); 89 continue; 90 } 91 ASSERT(child->db_parent == NULL || 92 child->db_parent == dn->dn_dbuf); 93 94 child->db_parent = db; 95 dbuf_add_ref(db, child); 96 if (db->db.db_data) 97 child->db_blkptr = (blkptr_t *)db->db.db_data + i; 98 else 99 child->db_blkptr = NULL; 100 dprintf_dbuf_bp(child, child->db_blkptr, 101 "changed db_blkptr to new indirect %s", ""); 102 103 mutex_exit(&child->db_mtx); 104 } 105 106 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr); 107 108 dbuf_rele(db, FTAG); 109 110 rw_exit(&dn->dn_struct_rwlock); 111 } 112 113 static void 114 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 115 { 116 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 117 uint64_t bytesfreed = 0; 118 119 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num); 120 121 for (int i = 0; i < num; i++, bp++) { 122 if (BP_IS_HOLE(bp)) 123 continue; 124 125 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE); 126 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys)); 127 128 /* 129 * Save some useful information on the holes being 130 * punched, including logical size, type, and indirection 131 * level. Retaining birth time enables detection of when 132 * holes are punched for reducing the number of free 133 * records transmitted during a zfs send. 134 */ 135 136 uint64_t lsize = BP_GET_LSIZE(bp); 137 dmu_object_type_t type = BP_GET_TYPE(bp); 138 uint64_t lvl = BP_GET_LEVEL(bp); 139 140 bzero(bp, sizeof (blkptr_t)); 141 142 if (spa_feature_is_active(dn->dn_objset->os_spa, 143 SPA_FEATURE_HOLE_BIRTH)) { 144 BP_SET_LSIZE(bp, lsize); 145 BP_SET_TYPE(bp, type); 146 BP_SET_LEVEL(bp, lvl); 147 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0); 148 } 149 } 150 dnode_diduse_space(dn, -bytesfreed); 151 } 152 153 #ifdef ZFS_DEBUG 154 static void 155 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 156 { 157 int off, num; 158 int i, err, epbs; 159 uint64_t txg = tx->tx_txg; 160 dnode_t *dn; 161 162 DB_DNODE_ENTER(db); 163 dn = DB_DNODE(db); 164 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 165 off = start - (db->db_blkid * 1<<epbs); 166 num = end - start + 1; 167 168 ASSERT3U(off, >=, 0); 169 ASSERT3U(num, >=, 0); 170 ASSERT3U(db->db_level, >, 0); 171 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 172 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 173 ASSERT(db->db_blkptr != NULL); 174 175 for (i = off; i < off+num; i++) { 176 uint64_t *buf; 177 dmu_buf_impl_t *child; 178 dbuf_dirty_record_t *dr; 179 int j; 180 181 ASSERT(db->db_level == 1); 182 183 rw_enter(&dn->dn_struct_rwlock, RW_READER); 184 err = dbuf_hold_impl(dn, db->db_level-1, 185 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child); 186 rw_exit(&dn->dn_struct_rwlock); 187 if (err == ENOENT) 188 continue; 189 ASSERT(err == 0); 190 ASSERT(child->db_level == 0); 191 dr = child->db_last_dirty; 192 while (dr && dr->dr_txg > txg) 193 dr = dr->dr_next; 194 ASSERT(dr == NULL || dr->dr_txg == txg); 195 196 /* data_old better be zeroed */ 197 if (dr) { 198 buf = dr->dt.dl.dr_data->b_data; 199 for (j = 0; j < child->db.db_size >> 3; j++) { 200 if (buf[j] != 0) { 201 panic("freed data not zero: " 202 "child=%p i=%d off=%d num=%d\n", 203 (void *)child, i, off, num); 204 } 205 } 206 } 207 208 /* 209 * db_data better be zeroed unless it's dirty in a 210 * future txg. 211 */ 212 mutex_enter(&child->db_mtx); 213 buf = child->db.db_data; 214 if (buf != NULL && child->db_state != DB_FILL && 215 child->db_last_dirty == NULL) { 216 for (j = 0; j < child->db.db_size >> 3; j++) { 217 if (buf[j] != 0) { 218 panic("freed data not zero: " 219 "child=%p i=%d off=%d num=%d\n", 220 (void *)child, i, off, num); 221 } 222 } 223 } 224 mutex_exit(&child->db_mtx); 225 226 dbuf_rele(child, FTAG); 227 } 228 DB_DNODE_EXIT(db); 229 } 230 #endif 231 232 /* 233 * We don't usually free the indirect blocks here. If in one txg we have a 234 * free_range and a write to the same indirect block, it's important that we 235 * preserve the hole's birth times. Therefore, we don't free any any indirect 236 * blocks in free_children(). If an indirect block happens to turn into all 237 * holes, it will be freed by dbuf_write_children_ready, which happens at a 238 * point in the syncing process where we know for certain the contents of the 239 * indirect block. 240 * 241 * However, if we're freeing a dnode, its space accounting must go to zero 242 * before we actually try to free the dnode, or we will trip an assertion. In 243 * addition, we know the case described above cannot occur, because the dnode is 244 * being freed. Therefore, we free the indirect blocks immediately in that 245 * case. 246 */ 247 static void 248 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, 249 boolean_t free_indirects, dmu_tx_t *tx) 250 { 251 dnode_t *dn; 252 blkptr_t *bp; 253 dmu_buf_impl_t *subdb; 254 uint64_t start, end, dbstart, dbend; 255 unsigned int epbs, shift, i; 256 257 /* 258 * There is a small possibility that this block will not be cached: 259 * 1 - if level > 1 and there are no children with level <= 1 260 * 2 - if this block was evicted since we read it from 261 * dmu_tx_hold_free(). 262 */ 263 if (db->db_state != DB_CACHED) 264 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 265 266 /* 267 * If we modify this indirect block, and we are not freeing the 268 * dnode (!free_indirects), then this indirect block needs to get 269 * written to disk by dbuf_write(). If it is dirty, we know it will 270 * be written (otherwise, we would have incorrect on-disk state 271 * because the space would be freed but still referenced by the BP 272 * in this indirect block). Therefore we VERIFY that it is 273 * dirty. 274 * 275 * Our VERIFY covers some cases that do not actually have to be 276 * dirty, but the open-context code happens to dirty. E.g. if the 277 * blocks we are freeing are all holes, because in that case, we 278 * are only freeing part of this indirect block, so it is an 279 * ancestor of the first or last block to be freed. The first and 280 * last L1 indirect blocks are always dirtied by dnode_free_range(). 281 */ 282 VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0); 283 284 dbuf_release_bp(db); 285 bp = db->db.db_data; 286 287 DB_DNODE_ENTER(db); 288 dn = DB_DNODE(db); 289 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 290 ASSERT3U(epbs, <, 31); 291 shift = (db->db_level - 1) * epbs; 292 dbstart = db->db_blkid << epbs; 293 start = blkid >> shift; 294 if (dbstart < start) { 295 bp += start - dbstart; 296 } else { 297 start = dbstart; 298 } 299 dbend = ((db->db_blkid + 1) << epbs) - 1; 300 end = (blkid + nblks - 1) >> shift; 301 if (dbend <= end) 302 end = dbend; 303 304 ASSERT3U(start, <=, end); 305 306 if (db->db_level == 1) { 307 FREE_VERIFY(db, start, end, tx); 308 free_blocks(dn, bp, end-start+1, tx); 309 } else { 310 for (uint64_t id = start; id <= end; id++, bp++) { 311 if (BP_IS_HOLE(bp)) 312 continue; 313 rw_enter(&dn->dn_struct_rwlock, RW_READER); 314 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1, 315 id, TRUE, FALSE, FTAG, &subdb)); 316 rw_exit(&dn->dn_struct_rwlock); 317 ASSERT3P(bp, ==, subdb->db_blkptr); 318 319 free_children(subdb, blkid, nblks, free_indirects, tx); 320 dbuf_rele(subdb, FTAG); 321 } 322 } 323 324 if (free_indirects) { 325 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) 326 ASSERT(BP_IS_HOLE(bp)); 327 bzero(db->db.db_data, db->db.db_size); 328 free_blocks(dn, db->db_blkptr, 1, tx); 329 } 330 331 DB_DNODE_EXIT(db); 332 arc_buf_freeze(db->db_buf); 333 } 334 335 /* 336 * Traverse the indicated range of the provided file 337 * and "free" all the blocks contained there. 338 */ 339 static void 340 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks, 341 boolean_t free_indirects, dmu_tx_t *tx) 342 { 343 blkptr_t *bp = dn->dn_phys->dn_blkptr; 344 int dnlevel = dn->dn_phys->dn_nlevels; 345 boolean_t trunc = B_FALSE; 346 347 if (blkid > dn->dn_phys->dn_maxblkid) 348 return; 349 350 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 351 if (blkid + nblks > dn->dn_phys->dn_maxblkid) { 352 nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 353 trunc = B_TRUE; 354 } 355 356 /* There are no indirect blocks in the object */ 357 if (dnlevel == 1) { 358 if (blkid >= dn->dn_phys->dn_nblkptr) { 359 /* this range was never made persistent */ 360 return; 361 } 362 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 363 free_blocks(dn, bp + blkid, nblks, tx); 364 } else { 365 int shift = (dnlevel - 1) * 366 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 367 int start = blkid >> shift; 368 int end = (blkid + nblks - 1) >> shift; 369 dmu_buf_impl_t *db; 370 371 ASSERT(start < dn->dn_phys->dn_nblkptr); 372 bp += start; 373 for (int i = start; i <= end; i++, bp++) { 374 if (BP_IS_HOLE(bp)) 375 continue; 376 rw_enter(&dn->dn_struct_rwlock, RW_READER); 377 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i, 378 TRUE, FALSE, FTAG, &db)); 379 rw_exit(&dn->dn_struct_rwlock); 380 381 free_children(db, blkid, nblks, free_indirects, tx); 382 dbuf_rele(db, FTAG); 383 } 384 } 385 386 if (trunc) { 387 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1; 388 389 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 390 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 391 ASSERT(off < dn->dn_phys->dn_maxblkid || 392 dn->dn_phys->dn_maxblkid == 0 || 393 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0); 394 } 395 } 396 397 typedef struct dnode_sync_free_range_arg { 398 dnode_t *dsfra_dnode; 399 dmu_tx_t *dsfra_tx; 400 boolean_t dsfra_free_indirects; 401 } dnode_sync_free_range_arg_t; 402 403 static void 404 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks) 405 { 406 dnode_sync_free_range_arg_t *dsfra = arg; 407 dnode_t *dn = dsfra->dsfra_dnode; 408 409 mutex_exit(&dn->dn_mtx); 410 dnode_sync_free_range_impl(dn, blkid, nblks, 411 dsfra->dsfra_free_indirects, dsfra->dsfra_tx); 412 mutex_enter(&dn->dn_mtx); 413 } 414 415 /* 416 * Try to kick all the dnode's dbufs out of the cache... 417 */ 418 void 419 dnode_evict_dbufs(dnode_t *dn) 420 { 421 dmu_buf_impl_t db_marker; 422 dmu_buf_impl_t *db, *db_next; 423 424 mutex_enter(&dn->dn_dbufs_mtx); 425 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) { 426 427 #ifdef DEBUG 428 DB_DNODE_ENTER(db); 429 ASSERT3P(DB_DNODE(db), ==, dn); 430 DB_DNODE_EXIT(db); 431 #endif /* DEBUG */ 432 433 mutex_enter(&db->db_mtx); 434 if (db->db_state != DB_EVICTING && 435 zfs_refcount_is_zero(&db->db_holds)) { 436 db_marker.db_level = db->db_level; 437 db_marker.db_blkid = db->db_blkid; 438 db_marker.db_state = DB_SEARCH; 439 avl_insert_here(&dn->dn_dbufs, &db_marker, db, 440 AVL_BEFORE); 441 442 /* 443 * We need to use the "marker" dbuf rather than 444 * simply getting the next dbuf, because 445 * dbuf_destroy() may actually remove multiple dbufs. 446 * It can call itself recursively on the parent dbuf, 447 * which may also be removed from dn_dbufs. The code 448 * flow would look like: 449 * 450 * dbuf_destroy(): 451 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE): 452 * if (!cacheable || pending_evict) 453 * dbuf_destroy() 454 */ 455 dbuf_destroy(db); 456 457 db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker); 458 avl_remove(&dn->dn_dbufs, &db_marker); 459 } else { 460 db->db_pending_evict = TRUE; 461 mutex_exit(&db->db_mtx); 462 db_next = AVL_NEXT(&dn->dn_dbufs, db); 463 } 464 } 465 mutex_exit(&dn->dn_dbufs_mtx); 466 467 dnode_evict_bonus(dn); 468 } 469 470 void 471 dnode_evict_bonus(dnode_t *dn) 472 { 473 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 474 if (dn->dn_bonus != NULL) { 475 if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) { 476 mutex_enter(&dn->dn_bonus->db_mtx); 477 dbuf_destroy(dn->dn_bonus); 478 dn->dn_bonus = NULL; 479 } else { 480 dn->dn_bonus->db_pending_evict = TRUE; 481 } 482 } 483 rw_exit(&dn->dn_struct_rwlock); 484 } 485 486 static void 487 dnode_undirty_dbufs(list_t *list) 488 { 489 dbuf_dirty_record_t *dr; 490 491 while (dr = list_head(list)) { 492 dmu_buf_impl_t *db = dr->dr_dbuf; 493 uint64_t txg = dr->dr_txg; 494 495 if (db->db_level != 0) 496 dnode_undirty_dbufs(&dr->dt.di.dr_children); 497 498 mutex_enter(&db->db_mtx); 499 /* XXX - use dbuf_undirty()? */ 500 list_remove(list, dr); 501 ASSERT(db->db_last_dirty == dr); 502 db->db_last_dirty = NULL; 503 db->db_dirtycnt -= 1; 504 if (db->db_level == 0) { 505 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 506 dr->dt.dl.dr_data == db->db_buf); 507 dbuf_unoverride(dr); 508 } else { 509 mutex_destroy(&dr->dt.di.dr_mtx); 510 list_destroy(&dr->dt.di.dr_children); 511 } 512 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 513 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE); 514 } 515 } 516 517 static void 518 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 519 { 520 int txgoff = tx->tx_txg & TXG_MASK; 521 522 ASSERT(dmu_tx_is_syncing(tx)); 523 524 /* 525 * Our contents should have been freed in dnode_sync() by the 526 * free range record inserted by the caller of dnode_free(). 527 */ 528 ASSERT0(DN_USED_BYTES(dn->dn_phys)); 529 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr)); 530 531 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]); 532 dnode_evict_dbufs(dn); 533 534 /* 535 * XXX - It would be nice to assert this, but we may still 536 * have residual holds from async evictions from the arc... 537 * 538 * zfs_obj_to_path() also depends on this being 539 * commented out. 540 * 541 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1); 542 */ 543 544 /* Undirty next bits */ 545 dn->dn_next_nlevels[txgoff] = 0; 546 dn->dn_next_indblkshift[txgoff] = 0; 547 dn->dn_next_blksz[txgoff] = 0; 548 549 /* ASSERT(blkptrs are zero); */ 550 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 551 ASSERT(dn->dn_type != DMU_OT_NONE); 552 553 ASSERT(dn->dn_free_txg > 0); 554 if (dn->dn_allocated_txg != dn->dn_free_txg) 555 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx); 556 bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots); 557 dnode_free_interior_slots(dn); 558 559 mutex_enter(&dn->dn_mtx); 560 dn->dn_type = DMU_OT_NONE; 561 dn->dn_maxblkid = 0; 562 dn->dn_allocated_txg = 0; 563 dn->dn_free_txg = 0; 564 dn->dn_have_spill = B_FALSE; 565 dn->dn_num_slots = 1; 566 mutex_exit(&dn->dn_mtx); 567 568 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 569 570 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 571 /* 572 * Now that we've released our hold, the dnode may 573 * be evicted, so we musn't access it. 574 */ 575 } 576 577 /* 578 * Write out the dnode's dirty buffers. 579 */ 580 void 581 dnode_sync(dnode_t *dn, dmu_tx_t *tx) 582 { 583 dnode_phys_t *dnp = dn->dn_phys; 584 int txgoff = tx->tx_txg & TXG_MASK; 585 list_t *list = &dn->dn_dirty_records[txgoff]; 586 static const dnode_phys_t zerodn = { 0 }; 587 boolean_t kill_spill = B_FALSE; 588 589 ASSERT(dmu_tx_is_syncing(tx)); 590 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 591 ASSERT(dnp->dn_type != DMU_OT_NONE || 592 bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0); 593 DNODE_VERIFY(dn); 594 595 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf)); 596 597 if (dmu_objset_userused_enabled(dn->dn_objset) && 598 !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 599 mutex_enter(&dn->dn_mtx); 600 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys); 601 dn->dn_oldflags = dn->dn_phys->dn_flags; 602 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED; 603 mutex_exit(&dn->dn_mtx); 604 dmu_objset_userquota_get_ids(dn, B_FALSE, tx); 605 } else { 606 /* Once we account for it, we should always account for it. */ 607 ASSERT(!(dn->dn_phys->dn_flags & 608 DNODE_FLAG_USERUSED_ACCOUNTED)); 609 } 610 611 mutex_enter(&dn->dn_mtx); 612 if (dn->dn_allocated_txg == tx->tx_txg) { 613 /* The dnode is newly allocated or reallocated */ 614 if (dnp->dn_type == DMU_OT_NONE) { 615 /* this is a first alloc, not a realloc */ 616 dnp->dn_nlevels = 1; 617 dnp->dn_nblkptr = dn->dn_nblkptr; 618 } 619 620 dnp->dn_type = dn->dn_type; 621 dnp->dn_bonustype = dn->dn_bonustype; 622 dnp->dn_bonuslen = dn->dn_bonuslen; 623 } 624 625 dnp->dn_extra_slots = dn->dn_num_slots - 1; 626 627 ASSERT(dnp->dn_nlevels > 1 || 628 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 629 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) || 630 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 631 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 632 ASSERT(dnp->dn_nlevels < 2 || 633 BP_IS_HOLE(&dnp->dn_blkptr[0]) || 634 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift); 635 636 if (dn->dn_next_type[txgoff] != 0) { 637 dnp->dn_type = dn->dn_type; 638 dn->dn_next_type[txgoff] = 0; 639 } 640 641 if (dn->dn_next_blksz[txgoff] != 0) { 642 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff], 643 SPA_MINBLOCKSIZE) == 0); 644 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) || 645 dn->dn_maxblkid == 0 || list_head(list) != NULL || 646 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT == 647 dnp->dn_datablkszsec || 648 !range_tree_is_empty(dn->dn_free_ranges[txgoff])); 649 dnp->dn_datablkszsec = 650 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT; 651 dn->dn_next_blksz[txgoff] = 0; 652 } 653 654 if (dn->dn_next_bonuslen[txgoff] != 0) { 655 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN) 656 dnp->dn_bonuslen = 0; 657 else 658 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff]; 659 ASSERT(dnp->dn_bonuslen <= 660 DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1)); 661 dn->dn_next_bonuslen[txgoff] = 0; 662 } 663 664 if (dn->dn_next_bonustype[txgoff] != 0) { 665 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff])); 666 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff]; 667 dn->dn_next_bonustype[txgoff] = 0; 668 } 669 670 boolean_t freeing_dnode = dn->dn_free_txg > 0 && 671 dn->dn_free_txg <= tx->tx_txg; 672 673 /* 674 * Remove the spill block if we have been explicitly asked to 675 * remove it, or if the object is being removed. 676 */ 677 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) { 678 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 679 kill_spill = B_TRUE; 680 dn->dn_rm_spillblk[txgoff] = 0; 681 } 682 683 if (dn->dn_next_indblkshift[txgoff] != 0) { 684 ASSERT(dnp->dn_nlevels == 1); 685 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 686 dn->dn_next_indblkshift[txgoff] = 0; 687 } 688 689 /* 690 * Just take the live (open-context) values for checksum and compress. 691 * Strictly speaking it's a future leak, but nothing bad happens if we 692 * start using the new checksum or compress algorithm a little early. 693 */ 694 dnp->dn_checksum = dn->dn_checksum; 695 dnp->dn_compress = dn->dn_compress; 696 697 mutex_exit(&dn->dn_mtx); 698 699 if (kill_spill) { 700 free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx); 701 mutex_enter(&dn->dn_mtx); 702 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR; 703 mutex_exit(&dn->dn_mtx); 704 } 705 706 /* process all the "freed" ranges in the file */ 707 if (dn->dn_free_ranges[txgoff] != NULL) { 708 dnode_sync_free_range_arg_t dsfra; 709 dsfra.dsfra_dnode = dn; 710 dsfra.dsfra_tx = tx; 711 dsfra.dsfra_free_indirects = freeing_dnode; 712 if (freeing_dnode) { 713 ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff], 714 0, dn->dn_maxblkid + 1)); 715 } 716 mutex_enter(&dn->dn_mtx); 717 range_tree_vacate(dn->dn_free_ranges[txgoff], 718 dnode_sync_free_range, &dsfra); 719 range_tree_destroy(dn->dn_free_ranges[txgoff]); 720 dn->dn_free_ranges[txgoff] = NULL; 721 mutex_exit(&dn->dn_mtx); 722 } 723 724 if (freeing_dnode) { 725 dn->dn_objset->os_freed_dnodes++; 726 dnode_sync_free(dn, tx); 727 return; 728 } 729 730 if (dn->dn_num_slots > DNODE_MIN_SLOTS) { 731 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 732 mutex_enter(&ds->ds_lock); 733 ds->ds_feature_activation_needed[SPA_FEATURE_LARGE_DNODE] = 734 B_TRUE; 735 mutex_exit(&ds->ds_lock); 736 } 737 738 if (dn->dn_next_nlevels[txgoff]) { 739 dnode_increase_indirection(dn, tx); 740 dn->dn_next_nlevels[txgoff] = 0; 741 } 742 743 if (dn->dn_next_nblkptr[txgoff]) { 744 /* this should only happen on a realloc */ 745 ASSERT(dn->dn_allocated_txg == tx->tx_txg); 746 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) { 747 /* zero the new blkptrs we are gaining */ 748 bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 749 sizeof (blkptr_t) * 750 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr)); 751 #ifdef ZFS_DEBUG 752 } else { 753 int i; 754 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr); 755 /* the blkptrs we are losing better be unallocated */ 756 for (i = dn->dn_next_nblkptr[txgoff]; 757 i < dnp->dn_nblkptr; i++) 758 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i])); 759 #endif 760 } 761 mutex_enter(&dn->dn_mtx); 762 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff]; 763 dn->dn_next_nblkptr[txgoff] = 0; 764 mutex_exit(&dn->dn_mtx); 765 } 766 767 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx); 768 769 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 770 ASSERT3P(list_head(list), ==, NULL); 771 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 772 } 773 774 /* 775 * Although we have dropped our reference to the dnode, it 776 * can't be evicted until its written, and we haven't yet 777 * initiated the IO for the dnode's dbuf. 778 */ 779 } 780