1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dbuf.h> 28 #include <sys/dnode.h> 29 #include <sys/dmu.h> 30 #include <sys/dmu_impl.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/dmu_objset.h> 33 #include <sys/dsl_dir.h> 34 #include <sys/dsl_dataset.h> 35 #include <sys/spa.h> 36 #include <sys/zio.h> 37 #include <sys/dmu_zfetch.h> 38 #include <sys/range_tree.h> 39 40 static kmem_cache_t *dnode_cache; 41 /* 42 * Define DNODE_STATS to turn on statistic gathering. By default, it is only 43 * turned on when DEBUG is also defined. 44 */ 45 #ifdef DEBUG 46 #define DNODE_STATS 47 #endif /* DEBUG */ 48 49 #ifdef DNODE_STATS 50 #define DNODE_STAT_ADD(stat) ((stat)++) 51 #else 52 #define DNODE_STAT_ADD(stat) /* nothing */ 53 #endif /* DNODE_STATS */ 54 55 static dnode_phys_t dnode_phys_zero; 56 57 int zfs_default_bs = SPA_MINBLOCKSHIFT; 58 int zfs_default_ibs = DN_MAX_INDBLKSHIFT; 59 60 static kmem_cbrc_t dnode_move(void *, void *, size_t, void *); 61 62 static int 63 dbuf_compare(const void *x1, const void *x2) 64 { 65 const dmu_buf_impl_t *d1 = x1; 66 const dmu_buf_impl_t *d2 = x2; 67 68 if (d1->db_level < d2->db_level) { 69 return (-1); 70 } else if (d1->db_level > d2->db_level) { 71 return (1); 72 } 73 74 if (d1->db_blkid < d2->db_blkid) { 75 return (-1); 76 } else if (d1->db_blkid > d2->db_blkid) { 77 return (1); 78 } 79 80 /* 81 * If a dbuf is being evicted while dn_dbufs_mutex is not held, we set 82 * the db_state to DB_EVICTING but do not remove it from dn_dbufs. If 83 * another thread creates a dbuf of the same blkid before the dbuf is 84 * removed from dn_dbufs, we can reach a state where there are two 85 * dbufs of the same blkid and level in db_dbufs. To maintain the avl 86 * invariant that there cannot be duplicate items, we distinguish 87 * between these two dbufs based on the time they were created. 88 */ 89 if (d1->db_creation < d2->db_creation) { 90 return (-1); 91 } else if (d1->db_creation > d2->db_creation) { 92 return (1); 93 } else { 94 ASSERT3P(d1, ==, d2); 95 return (0); 96 } 97 } 98 99 /* ARGSUSED */ 100 static int 101 dnode_cons(void *arg, void *unused, int kmflag) 102 { 103 dnode_t *dn = arg; 104 int i; 105 106 rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL); 107 mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL); 108 mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL); 109 cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL); 110 111 /* 112 * Every dbuf has a reference, and dropping a tracked reference is 113 * O(number of references), so don't track dn_holds. 114 */ 115 refcount_create_untracked(&dn->dn_holds); 116 refcount_create(&dn->dn_tx_holds); 117 list_link_init(&dn->dn_link); 118 119 bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr)); 120 bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels)); 121 bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift)); 122 bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype)); 123 bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk)); 124 bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen)); 125 bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz)); 126 127 for (i = 0; i < TXG_SIZE; i++) { 128 list_link_init(&dn->dn_dirty_link[i]); 129 dn->dn_free_ranges[i] = NULL; 130 list_create(&dn->dn_dirty_records[i], 131 sizeof (dbuf_dirty_record_t), 132 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 133 } 134 135 dn->dn_allocated_txg = 0; 136 dn->dn_free_txg = 0; 137 dn->dn_assigned_txg = 0; 138 dn->dn_dirtyctx = 0; 139 dn->dn_dirtyctx_firstset = NULL; 140 dn->dn_bonus = NULL; 141 dn->dn_have_spill = B_FALSE; 142 dn->dn_zio = NULL; 143 dn->dn_oldused = 0; 144 dn->dn_oldflags = 0; 145 dn->dn_olduid = 0; 146 dn->dn_oldgid = 0; 147 dn->dn_newuid = 0; 148 dn->dn_newgid = 0; 149 dn->dn_id_flags = 0; 150 151 dn->dn_dbufs_count = 0; 152 dn->dn_unlisted_l0_blkid = 0; 153 avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t), 154 offsetof(dmu_buf_impl_t, db_link)); 155 156 dn->dn_moved = 0; 157 return (0); 158 } 159 160 /* ARGSUSED */ 161 static void 162 dnode_dest(void *arg, void *unused) 163 { 164 int i; 165 dnode_t *dn = arg; 166 167 rw_destroy(&dn->dn_struct_rwlock); 168 mutex_destroy(&dn->dn_mtx); 169 mutex_destroy(&dn->dn_dbufs_mtx); 170 cv_destroy(&dn->dn_notxholds); 171 refcount_destroy(&dn->dn_holds); 172 refcount_destroy(&dn->dn_tx_holds); 173 ASSERT(!list_link_active(&dn->dn_link)); 174 175 for (i = 0; i < TXG_SIZE; i++) { 176 ASSERT(!list_link_active(&dn->dn_dirty_link[i])); 177 ASSERT3P(dn->dn_free_ranges[i], ==, NULL); 178 list_destroy(&dn->dn_dirty_records[i]); 179 ASSERT0(dn->dn_next_nblkptr[i]); 180 ASSERT0(dn->dn_next_nlevels[i]); 181 ASSERT0(dn->dn_next_indblkshift[i]); 182 ASSERT0(dn->dn_next_bonustype[i]); 183 ASSERT0(dn->dn_rm_spillblk[i]); 184 ASSERT0(dn->dn_next_bonuslen[i]); 185 ASSERT0(dn->dn_next_blksz[i]); 186 } 187 188 ASSERT0(dn->dn_allocated_txg); 189 ASSERT0(dn->dn_free_txg); 190 ASSERT0(dn->dn_assigned_txg); 191 ASSERT0(dn->dn_dirtyctx); 192 ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL); 193 ASSERT3P(dn->dn_bonus, ==, NULL); 194 ASSERT(!dn->dn_have_spill); 195 ASSERT3P(dn->dn_zio, ==, NULL); 196 ASSERT0(dn->dn_oldused); 197 ASSERT0(dn->dn_oldflags); 198 ASSERT0(dn->dn_olduid); 199 ASSERT0(dn->dn_oldgid); 200 ASSERT0(dn->dn_newuid); 201 ASSERT0(dn->dn_newgid); 202 ASSERT0(dn->dn_id_flags); 203 204 ASSERT0(dn->dn_dbufs_count); 205 ASSERT0(dn->dn_unlisted_l0_blkid); 206 avl_destroy(&dn->dn_dbufs); 207 } 208 209 void 210 dnode_init(void) 211 { 212 ASSERT(dnode_cache == NULL); 213 dnode_cache = kmem_cache_create("dnode_t", 214 sizeof (dnode_t), 215 0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0); 216 kmem_cache_set_move(dnode_cache, dnode_move); 217 } 218 219 void 220 dnode_fini(void) 221 { 222 kmem_cache_destroy(dnode_cache); 223 dnode_cache = NULL; 224 } 225 226 227 #ifdef ZFS_DEBUG 228 void 229 dnode_verify(dnode_t *dn) 230 { 231 int drop_struct_lock = FALSE; 232 233 ASSERT(dn->dn_phys); 234 ASSERT(dn->dn_objset); 235 ASSERT(dn->dn_handle->dnh_dnode == dn); 236 237 ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type)); 238 239 if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY)) 240 return; 241 242 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 243 rw_enter(&dn->dn_struct_rwlock, RW_READER); 244 drop_struct_lock = TRUE; 245 } 246 if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) { 247 int i; 248 ASSERT3U(dn->dn_indblkshift, >=, 0); 249 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT); 250 if (dn->dn_datablkshift) { 251 ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT); 252 ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT); 253 ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz); 254 } 255 ASSERT3U(dn->dn_nlevels, <=, 30); 256 ASSERT(DMU_OT_IS_VALID(dn->dn_type)); 257 ASSERT3U(dn->dn_nblkptr, >=, 1); 258 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); 259 ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN); 260 ASSERT3U(dn->dn_datablksz, ==, 261 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT); 262 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0); 263 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) + 264 dn->dn_bonuslen, <=, DN_MAX_BONUSLEN); 265 for (i = 0; i < TXG_SIZE; i++) { 266 ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels); 267 } 268 } 269 if (dn->dn_phys->dn_type != DMU_OT_NONE) 270 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels); 271 ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL); 272 if (dn->dn_dbuf != NULL) { 273 ASSERT3P(dn->dn_phys, ==, 274 (dnode_phys_t *)dn->dn_dbuf->db.db_data + 275 (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT))); 276 } 277 if (drop_struct_lock) 278 rw_exit(&dn->dn_struct_rwlock); 279 } 280 #endif 281 282 void 283 dnode_byteswap(dnode_phys_t *dnp) 284 { 285 uint64_t *buf64 = (void*)&dnp->dn_blkptr; 286 int i; 287 288 if (dnp->dn_type == DMU_OT_NONE) { 289 bzero(dnp, sizeof (dnode_phys_t)); 290 return; 291 } 292 293 dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec); 294 dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen); 295 dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid); 296 dnp->dn_used = BSWAP_64(dnp->dn_used); 297 298 /* 299 * dn_nblkptr is only one byte, so it's OK to read it in either 300 * byte order. We can't read dn_bouslen. 301 */ 302 ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT); 303 ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR); 304 for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++) 305 buf64[i] = BSWAP_64(buf64[i]); 306 307 /* 308 * OK to check dn_bonuslen for zero, because it won't matter if 309 * we have the wrong byte order. This is necessary because the 310 * dnode dnode is smaller than a regular dnode. 311 */ 312 if (dnp->dn_bonuslen != 0) { 313 /* 314 * Note that the bonus length calculated here may be 315 * longer than the actual bonus buffer. This is because 316 * we always put the bonus buffer after the last block 317 * pointer (instead of packing it against the end of the 318 * dnode buffer). 319 */ 320 int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t); 321 size_t len = DN_MAX_BONUSLEN - off; 322 ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype)); 323 dmu_object_byteswap_t byteswap = 324 DMU_OT_BYTESWAP(dnp->dn_bonustype); 325 dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len); 326 } 327 328 /* Swap SPILL block if we have one */ 329 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 330 byteswap_uint64_array(&dnp->dn_spill, sizeof (blkptr_t)); 331 332 } 333 334 void 335 dnode_buf_byteswap(void *vbuf, size_t size) 336 { 337 dnode_phys_t *buf = vbuf; 338 int i; 339 340 ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT)); 341 ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0); 342 343 size >>= DNODE_SHIFT; 344 for (i = 0; i < size; i++) { 345 dnode_byteswap(buf); 346 buf++; 347 } 348 } 349 350 void 351 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx) 352 { 353 ASSERT3U(refcount_count(&dn->dn_holds), >=, 1); 354 355 dnode_setdirty(dn, tx); 356 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 357 ASSERT3U(newsize, <=, DN_MAX_BONUSLEN - 358 (dn->dn_nblkptr-1) * sizeof (blkptr_t)); 359 dn->dn_bonuslen = newsize; 360 if (newsize == 0) 361 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN; 362 else 363 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; 364 rw_exit(&dn->dn_struct_rwlock); 365 } 366 367 void 368 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx) 369 { 370 ASSERT3U(refcount_count(&dn->dn_holds), >=, 1); 371 dnode_setdirty(dn, tx); 372 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 373 dn->dn_bonustype = newtype; 374 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; 375 rw_exit(&dn->dn_struct_rwlock); 376 } 377 378 void 379 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx) 380 { 381 ASSERT3U(refcount_count(&dn->dn_holds), >=, 1); 382 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 383 dnode_setdirty(dn, tx); 384 dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK; 385 dn->dn_have_spill = B_FALSE; 386 } 387 388 static void 389 dnode_setdblksz(dnode_t *dn, int size) 390 { 391 ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE)); 392 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 393 ASSERT3U(size, >=, SPA_MINBLOCKSIZE); 394 ASSERT3U(size >> SPA_MINBLOCKSHIFT, <, 395 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8)); 396 dn->dn_datablksz = size; 397 dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT; 398 dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0; 399 } 400 401 static dnode_t * 402 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db, 403 uint64_t object, dnode_handle_t *dnh) 404 { 405 dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP); 406 407 ASSERT(!POINTER_IS_VALID(dn->dn_objset)); 408 dn->dn_moved = 0; 409 410 /* 411 * Defer setting dn_objset until the dnode is ready to be a candidate 412 * for the dnode_move() callback. 413 */ 414 dn->dn_object = object; 415 dn->dn_dbuf = db; 416 dn->dn_handle = dnh; 417 dn->dn_phys = dnp; 418 419 if (dnp->dn_datablkszsec) { 420 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 421 } else { 422 dn->dn_datablksz = 0; 423 dn->dn_datablkszsec = 0; 424 dn->dn_datablkshift = 0; 425 } 426 dn->dn_indblkshift = dnp->dn_indblkshift; 427 dn->dn_nlevels = dnp->dn_nlevels; 428 dn->dn_type = dnp->dn_type; 429 dn->dn_nblkptr = dnp->dn_nblkptr; 430 dn->dn_checksum = dnp->dn_checksum; 431 dn->dn_compress = dnp->dn_compress; 432 dn->dn_bonustype = dnp->dn_bonustype; 433 dn->dn_bonuslen = dnp->dn_bonuslen; 434 dn->dn_maxblkid = dnp->dn_maxblkid; 435 dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0); 436 dn->dn_id_flags = 0; 437 438 dmu_zfetch_init(&dn->dn_zfetch, dn); 439 440 ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type)); 441 442 mutex_enter(&os->os_lock); 443 list_insert_head(&os->os_dnodes, dn); 444 membar_producer(); 445 /* 446 * Everything else must be valid before assigning dn_objset makes the 447 * dnode eligible for dnode_move(). 448 */ 449 dn->dn_objset = os; 450 mutex_exit(&os->os_lock); 451 452 arc_space_consume(sizeof (dnode_t), ARC_SPACE_OTHER); 453 return (dn); 454 } 455 456 /* 457 * Caller must be holding the dnode handle, which is released upon return. 458 */ 459 static void 460 dnode_destroy(dnode_t *dn) 461 { 462 objset_t *os = dn->dn_objset; 463 464 ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0); 465 466 mutex_enter(&os->os_lock); 467 POINTER_INVALIDATE(&dn->dn_objset); 468 list_remove(&os->os_dnodes, dn); 469 mutex_exit(&os->os_lock); 470 471 /* the dnode can no longer move, so we can release the handle */ 472 zrl_remove(&dn->dn_handle->dnh_zrlock); 473 474 dn->dn_allocated_txg = 0; 475 dn->dn_free_txg = 0; 476 dn->dn_assigned_txg = 0; 477 478 dn->dn_dirtyctx = 0; 479 if (dn->dn_dirtyctx_firstset != NULL) { 480 kmem_free(dn->dn_dirtyctx_firstset, 1); 481 dn->dn_dirtyctx_firstset = NULL; 482 } 483 if (dn->dn_bonus != NULL) { 484 mutex_enter(&dn->dn_bonus->db_mtx); 485 dbuf_evict(dn->dn_bonus); 486 dn->dn_bonus = NULL; 487 } 488 dn->dn_zio = NULL; 489 490 dn->dn_have_spill = B_FALSE; 491 dn->dn_oldused = 0; 492 dn->dn_oldflags = 0; 493 dn->dn_olduid = 0; 494 dn->dn_oldgid = 0; 495 dn->dn_newuid = 0; 496 dn->dn_newgid = 0; 497 dn->dn_id_flags = 0; 498 dn->dn_unlisted_l0_blkid = 0; 499 500 dmu_zfetch_rele(&dn->dn_zfetch); 501 kmem_cache_free(dnode_cache, dn); 502 arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER); 503 } 504 505 void 506 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs, 507 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) 508 { 509 int i; 510 511 if (blocksize == 0) 512 blocksize = 1 << zfs_default_bs; 513 else if (blocksize > SPA_MAXBLOCKSIZE) 514 blocksize = SPA_MAXBLOCKSIZE; 515 else 516 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE); 517 518 if (ibs == 0) 519 ibs = zfs_default_ibs; 520 521 ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT); 522 523 dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset, 524 dn->dn_object, tx->tx_txg, blocksize, ibs); 525 526 ASSERT(dn->dn_type == DMU_OT_NONE); 527 ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0); 528 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE); 529 ASSERT(ot != DMU_OT_NONE); 530 ASSERT(DMU_OT_IS_VALID(ot)); 531 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || 532 (bonustype == DMU_OT_SA && bonuslen == 0) || 533 (bonustype != DMU_OT_NONE && bonuslen != 0)); 534 ASSERT(DMU_OT_IS_VALID(bonustype)); 535 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN); 536 ASSERT(dn->dn_type == DMU_OT_NONE); 537 ASSERT0(dn->dn_maxblkid); 538 ASSERT0(dn->dn_allocated_txg); 539 ASSERT0(dn->dn_assigned_txg); 540 ASSERT(refcount_is_zero(&dn->dn_tx_holds)); 541 ASSERT3U(refcount_count(&dn->dn_holds), <=, 1); 542 ASSERT(avl_is_empty(&dn->dn_dbufs)); 543 544 for (i = 0; i < TXG_SIZE; i++) { 545 ASSERT0(dn->dn_next_nblkptr[i]); 546 ASSERT0(dn->dn_next_nlevels[i]); 547 ASSERT0(dn->dn_next_indblkshift[i]); 548 ASSERT0(dn->dn_next_bonuslen[i]); 549 ASSERT0(dn->dn_next_bonustype[i]); 550 ASSERT0(dn->dn_rm_spillblk[i]); 551 ASSERT0(dn->dn_next_blksz[i]); 552 ASSERT(!list_link_active(&dn->dn_dirty_link[i])); 553 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL); 554 ASSERT3P(dn->dn_free_ranges[i], ==, NULL); 555 } 556 557 dn->dn_type = ot; 558 dnode_setdblksz(dn, blocksize); 559 dn->dn_indblkshift = ibs; 560 dn->dn_nlevels = 1; 561 if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */ 562 dn->dn_nblkptr = 1; 563 else 564 dn->dn_nblkptr = 1 + 565 ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT); 566 dn->dn_bonustype = bonustype; 567 dn->dn_bonuslen = bonuslen; 568 dn->dn_checksum = ZIO_CHECKSUM_INHERIT; 569 dn->dn_compress = ZIO_COMPRESS_INHERIT; 570 dn->dn_dirtyctx = 0; 571 572 dn->dn_free_txg = 0; 573 if (dn->dn_dirtyctx_firstset) { 574 kmem_free(dn->dn_dirtyctx_firstset, 1); 575 dn->dn_dirtyctx_firstset = NULL; 576 } 577 578 dn->dn_allocated_txg = tx->tx_txg; 579 dn->dn_id_flags = 0; 580 581 dnode_setdirty(dn, tx); 582 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs; 583 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen; 584 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype; 585 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz; 586 } 587 588 void 589 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, 590 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) 591 { 592 int nblkptr; 593 594 ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE); 595 ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE); 596 ASSERT0(blocksize % SPA_MINBLOCKSIZE); 597 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx)); 598 ASSERT(tx->tx_txg != 0); 599 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || 600 (bonustype != DMU_OT_NONE && bonuslen != 0) || 601 (bonustype == DMU_OT_SA && bonuslen == 0)); 602 ASSERT(DMU_OT_IS_VALID(bonustype)); 603 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN); 604 605 /* clean up any unreferenced dbufs */ 606 dnode_evict_dbufs(dn); 607 608 dn->dn_id_flags = 0; 609 610 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 611 dnode_setdirty(dn, tx); 612 if (dn->dn_datablksz != blocksize) { 613 /* change blocksize */ 614 ASSERT(dn->dn_maxblkid == 0 && 615 (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) || 616 dnode_block_freed(dn, 0))); 617 dnode_setdblksz(dn, blocksize); 618 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize; 619 } 620 if (dn->dn_bonuslen != bonuslen) 621 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen; 622 623 if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */ 624 nblkptr = 1; 625 else 626 nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT); 627 if (dn->dn_bonustype != bonustype) 628 dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype; 629 if (dn->dn_nblkptr != nblkptr) 630 dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr; 631 if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 632 dbuf_rm_spill(dn, tx); 633 dnode_rm_spill(dn, tx); 634 } 635 rw_exit(&dn->dn_struct_rwlock); 636 637 /* change type */ 638 dn->dn_type = ot; 639 640 /* change bonus size and type */ 641 mutex_enter(&dn->dn_mtx); 642 dn->dn_bonustype = bonustype; 643 dn->dn_bonuslen = bonuslen; 644 dn->dn_nblkptr = nblkptr; 645 dn->dn_checksum = ZIO_CHECKSUM_INHERIT; 646 dn->dn_compress = ZIO_COMPRESS_INHERIT; 647 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); 648 649 /* fix up the bonus db_size */ 650 if (dn->dn_bonus) { 651 dn->dn_bonus->db.db_size = 652 DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t); 653 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size); 654 } 655 656 dn->dn_allocated_txg = tx->tx_txg; 657 mutex_exit(&dn->dn_mtx); 658 } 659 660 #ifdef DNODE_STATS 661 static struct { 662 uint64_t dms_dnode_invalid; 663 uint64_t dms_dnode_recheck1; 664 uint64_t dms_dnode_recheck2; 665 uint64_t dms_dnode_special; 666 uint64_t dms_dnode_handle; 667 uint64_t dms_dnode_rwlock; 668 uint64_t dms_dnode_active; 669 } dnode_move_stats; 670 #endif /* DNODE_STATS */ 671 672 static void 673 dnode_move_impl(dnode_t *odn, dnode_t *ndn) 674 { 675 int i; 676 677 ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock)); 678 ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx)); 679 ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx)); 680 ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock)); 681 682 /* Copy fields. */ 683 ndn->dn_objset = odn->dn_objset; 684 ndn->dn_object = odn->dn_object; 685 ndn->dn_dbuf = odn->dn_dbuf; 686 ndn->dn_handle = odn->dn_handle; 687 ndn->dn_phys = odn->dn_phys; 688 ndn->dn_type = odn->dn_type; 689 ndn->dn_bonuslen = odn->dn_bonuslen; 690 ndn->dn_bonustype = odn->dn_bonustype; 691 ndn->dn_nblkptr = odn->dn_nblkptr; 692 ndn->dn_checksum = odn->dn_checksum; 693 ndn->dn_compress = odn->dn_compress; 694 ndn->dn_nlevels = odn->dn_nlevels; 695 ndn->dn_indblkshift = odn->dn_indblkshift; 696 ndn->dn_datablkshift = odn->dn_datablkshift; 697 ndn->dn_datablkszsec = odn->dn_datablkszsec; 698 ndn->dn_datablksz = odn->dn_datablksz; 699 ndn->dn_maxblkid = odn->dn_maxblkid; 700 bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0], 701 sizeof (odn->dn_next_nblkptr)); 702 bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0], 703 sizeof (odn->dn_next_nlevels)); 704 bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0], 705 sizeof (odn->dn_next_indblkshift)); 706 bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0], 707 sizeof (odn->dn_next_bonustype)); 708 bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0], 709 sizeof (odn->dn_rm_spillblk)); 710 bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0], 711 sizeof (odn->dn_next_bonuslen)); 712 bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0], 713 sizeof (odn->dn_next_blksz)); 714 for (i = 0; i < TXG_SIZE; i++) { 715 list_move_tail(&ndn->dn_dirty_records[i], 716 &odn->dn_dirty_records[i]); 717 } 718 bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0], 719 sizeof (odn->dn_free_ranges)); 720 ndn->dn_allocated_txg = odn->dn_allocated_txg; 721 ndn->dn_free_txg = odn->dn_free_txg; 722 ndn->dn_assigned_txg = odn->dn_assigned_txg; 723 ndn->dn_dirtyctx = odn->dn_dirtyctx; 724 ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset; 725 ASSERT(refcount_count(&odn->dn_tx_holds) == 0); 726 refcount_transfer(&ndn->dn_holds, &odn->dn_holds); 727 ASSERT(avl_is_empty(&ndn->dn_dbufs)); 728 avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs); 729 ndn->dn_dbufs_count = odn->dn_dbufs_count; 730 ndn->dn_unlisted_l0_blkid = odn->dn_unlisted_l0_blkid; 731 ndn->dn_bonus = odn->dn_bonus; 732 ndn->dn_have_spill = odn->dn_have_spill; 733 ndn->dn_zio = odn->dn_zio; 734 ndn->dn_oldused = odn->dn_oldused; 735 ndn->dn_oldflags = odn->dn_oldflags; 736 ndn->dn_olduid = odn->dn_olduid; 737 ndn->dn_oldgid = odn->dn_oldgid; 738 ndn->dn_newuid = odn->dn_newuid; 739 ndn->dn_newgid = odn->dn_newgid; 740 ndn->dn_id_flags = odn->dn_id_flags; 741 dmu_zfetch_init(&ndn->dn_zfetch, NULL); 742 list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream); 743 ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode; 744 ndn->dn_zfetch.zf_stream_cnt = odn->dn_zfetch.zf_stream_cnt; 745 ndn->dn_zfetch.zf_alloc_fail = odn->dn_zfetch.zf_alloc_fail; 746 747 /* 748 * Update back pointers. Updating the handle fixes the back pointer of 749 * every descendant dbuf as well as the bonus dbuf. 750 */ 751 ASSERT(ndn->dn_handle->dnh_dnode == odn); 752 ndn->dn_handle->dnh_dnode = ndn; 753 if (ndn->dn_zfetch.zf_dnode == odn) { 754 ndn->dn_zfetch.zf_dnode = ndn; 755 } 756 757 /* 758 * Invalidate the original dnode by clearing all of its back pointers. 759 */ 760 odn->dn_dbuf = NULL; 761 odn->dn_handle = NULL; 762 avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t), 763 offsetof(dmu_buf_impl_t, db_link)); 764 odn->dn_dbufs_count = 0; 765 odn->dn_unlisted_l0_blkid = 0; 766 odn->dn_bonus = NULL; 767 odn->dn_zfetch.zf_dnode = NULL; 768 769 /* 770 * Set the low bit of the objset pointer to ensure that dnode_move() 771 * recognizes the dnode as invalid in any subsequent callback. 772 */ 773 POINTER_INVALIDATE(&odn->dn_objset); 774 775 /* 776 * Satisfy the destructor. 777 */ 778 for (i = 0; i < TXG_SIZE; i++) { 779 list_create(&odn->dn_dirty_records[i], 780 sizeof (dbuf_dirty_record_t), 781 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 782 odn->dn_free_ranges[i] = NULL; 783 odn->dn_next_nlevels[i] = 0; 784 odn->dn_next_indblkshift[i] = 0; 785 odn->dn_next_bonustype[i] = 0; 786 odn->dn_rm_spillblk[i] = 0; 787 odn->dn_next_bonuslen[i] = 0; 788 odn->dn_next_blksz[i] = 0; 789 } 790 odn->dn_allocated_txg = 0; 791 odn->dn_free_txg = 0; 792 odn->dn_assigned_txg = 0; 793 odn->dn_dirtyctx = 0; 794 odn->dn_dirtyctx_firstset = NULL; 795 odn->dn_have_spill = B_FALSE; 796 odn->dn_zio = NULL; 797 odn->dn_oldused = 0; 798 odn->dn_oldflags = 0; 799 odn->dn_olduid = 0; 800 odn->dn_oldgid = 0; 801 odn->dn_newuid = 0; 802 odn->dn_newgid = 0; 803 odn->dn_id_flags = 0; 804 805 /* 806 * Mark the dnode. 807 */ 808 ndn->dn_moved = 1; 809 odn->dn_moved = (uint8_t)-1; 810 } 811 812 #ifdef _KERNEL 813 /*ARGSUSED*/ 814 static kmem_cbrc_t 815 dnode_move(void *buf, void *newbuf, size_t size, void *arg) 816 { 817 dnode_t *odn = buf, *ndn = newbuf; 818 objset_t *os; 819 int64_t refcount; 820 uint32_t dbufs; 821 822 /* 823 * The dnode is on the objset's list of known dnodes if the objset 824 * pointer is valid. We set the low bit of the objset pointer when 825 * freeing the dnode to invalidate it, and the memory patterns written 826 * by kmem (baddcafe and deadbeef) set at least one of the two low bits. 827 * A newly created dnode sets the objset pointer last of all to indicate 828 * that the dnode is known and in a valid state to be moved by this 829 * function. 830 */ 831 os = odn->dn_objset; 832 if (!POINTER_IS_VALID(os)) { 833 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_invalid); 834 return (KMEM_CBRC_DONT_KNOW); 835 } 836 837 /* 838 * Ensure that the objset does not go away during the move. 839 */ 840 rw_enter(&os_lock, RW_WRITER); 841 if (os != odn->dn_objset) { 842 rw_exit(&os_lock); 843 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck1); 844 return (KMEM_CBRC_DONT_KNOW); 845 } 846 847 /* 848 * If the dnode is still valid, then so is the objset. We know that no 849 * valid objset can be freed while we hold os_lock, so we can safely 850 * ensure that the objset remains in use. 851 */ 852 mutex_enter(&os->os_lock); 853 854 /* 855 * Recheck the objset pointer in case the dnode was removed just before 856 * acquiring the lock. 857 */ 858 if (os != odn->dn_objset) { 859 mutex_exit(&os->os_lock); 860 rw_exit(&os_lock); 861 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck2); 862 return (KMEM_CBRC_DONT_KNOW); 863 } 864 865 /* 866 * At this point we know that as long as we hold os->os_lock, the dnode 867 * cannot be freed and fields within the dnode can be safely accessed. 868 * The objset listing this dnode cannot go away as long as this dnode is 869 * on its list. 870 */ 871 rw_exit(&os_lock); 872 if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) { 873 mutex_exit(&os->os_lock); 874 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_special); 875 return (KMEM_CBRC_NO); 876 } 877 ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */ 878 879 /* 880 * Lock the dnode handle to prevent the dnode from obtaining any new 881 * holds. This also prevents the descendant dbufs and the bonus dbuf 882 * from accessing the dnode, so that we can discount their holds. The 883 * handle is safe to access because we know that while the dnode cannot 884 * go away, neither can its handle. Once we hold dnh_zrlock, we can 885 * safely move any dnode referenced only by dbufs. 886 */ 887 if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) { 888 mutex_exit(&os->os_lock); 889 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_handle); 890 return (KMEM_CBRC_LATER); 891 } 892 893 /* 894 * Ensure a consistent view of the dnode's holds and the dnode's dbufs. 895 * We need to guarantee that there is a hold for every dbuf in order to 896 * determine whether the dnode is actively referenced. Falsely matching 897 * a dbuf to an active hold would lead to an unsafe move. It's possible 898 * that a thread already having an active dnode hold is about to add a 899 * dbuf, and we can't compare hold and dbuf counts while the add is in 900 * progress. 901 */ 902 if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) { 903 zrl_exit(&odn->dn_handle->dnh_zrlock); 904 mutex_exit(&os->os_lock); 905 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_rwlock); 906 return (KMEM_CBRC_LATER); 907 } 908 909 /* 910 * A dbuf may be removed (evicted) without an active dnode hold. In that 911 * case, the dbuf count is decremented under the handle lock before the 912 * dbuf's hold is released. This order ensures that if we count the hold 913 * after the dbuf is removed but before its hold is released, we will 914 * treat the unmatched hold as active and exit safely. If we count the 915 * hold before the dbuf is removed, the hold is discounted, and the 916 * removal is blocked until the move completes. 917 */ 918 refcount = refcount_count(&odn->dn_holds); 919 ASSERT(refcount >= 0); 920 dbufs = odn->dn_dbufs_count; 921 922 /* We can't have more dbufs than dnode holds. */ 923 ASSERT3U(dbufs, <=, refcount); 924 DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount, 925 uint32_t, dbufs); 926 927 if (refcount > dbufs) { 928 rw_exit(&odn->dn_struct_rwlock); 929 zrl_exit(&odn->dn_handle->dnh_zrlock); 930 mutex_exit(&os->os_lock); 931 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_active); 932 return (KMEM_CBRC_LATER); 933 } 934 935 rw_exit(&odn->dn_struct_rwlock); 936 937 /* 938 * At this point we know that anyone with a hold on the dnode is not 939 * actively referencing it. The dnode is known and in a valid state to 940 * move. We're holding the locks needed to execute the critical section. 941 */ 942 dnode_move_impl(odn, ndn); 943 944 list_link_replace(&odn->dn_link, &ndn->dn_link); 945 /* If the dnode was safe to move, the refcount cannot have changed. */ 946 ASSERT(refcount == refcount_count(&ndn->dn_holds)); 947 ASSERT(dbufs == ndn->dn_dbufs_count); 948 zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */ 949 mutex_exit(&os->os_lock); 950 951 return (KMEM_CBRC_YES); 952 } 953 #endif /* _KERNEL */ 954 955 void 956 dnode_special_close(dnode_handle_t *dnh) 957 { 958 dnode_t *dn = dnh->dnh_dnode; 959 960 /* 961 * Wait for final references to the dnode to clear. This can 962 * only happen if the arc is asyncronously evicting state that 963 * has a hold on this dnode while we are trying to evict this 964 * dnode. 965 */ 966 while (refcount_count(&dn->dn_holds) > 0) 967 delay(1); 968 zrl_add(&dnh->dnh_zrlock); 969 dnode_destroy(dn); /* implicit zrl_remove() */ 970 zrl_destroy(&dnh->dnh_zrlock); 971 dnh->dnh_dnode = NULL; 972 } 973 974 dnode_t * 975 dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object, 976 dnode_handle_t *dnh) 977 { 978 dnode_t *dn = dnode_create(os, dnp, NULL, object, dnh); 979 dnh->dnh_dnode = dn; 980 zrl_init(&dnh->dnh_zrlock); 981 DNODE_VERIFY(dn); 982 return (dn); 983 } 984 985 static void 986 dnode_buf_pageout(dmu_buf_t *db, void *arg) 987 { 988 dnode_children_t *children_dnodes = arg; 989 int i; 990 int epb = db->db_size >> DNODE_SHIFT; 991 992 ASSERT(epb == children_dnodes->dnc_count); 993 994 for (i = 0; i < epb; i++) { 995 dnode_handle_t *dnh = &children_dnodes->dnc_children[i]; 996 dnode_t *dn; 997 998 /* 999 * The dnode handle lock guards against the dnode moving to 1000 * another valid address, so there is no need here to guard 1001 * against changes to or from NULL. 1002 */ 1003 if (dnh->dnh_dnode == NULL) { 1004 zrl_destroy(&dnh->dnh_zrlock); 1005 continue; 1006 } 1007 1008 zrl_add(&dnh->dnh_zrlock); 1009 dn = dnh->dnh_dnode; 1010 /* 1011 * If there are holds on this dnode, then there should 1012 * be holds on the dnode's containing dbuf as well; thus 1013 * it wouldn't be eligible for eviction and this function 1014 * would not have been called. 1015 */ 1016 ASSERT(refcount_is_zero(&dn->dn_holds)); 1017 ASSERT(refcount_is_zero(&dn->dn_tx_holds)); 1018 1019 dnode_destroy(dn); /* implicit zrl_remove() */ 1020 zrl_destroy(&dnh->dnh_zrlock); 1021 dnh->dnh_dnode = NULL; 1022 } 1023 kmem_free(children_dnodes, sizeof (dnode_children_t) + 1024 epb * sizeof (dnode_handle_t)); 1025 } 1026 1027 /* 1028 * errors: 1029 * EINVAL - invalid object number. 1030 * EIO - i/o error. 1031 * succeeds even for free dnodes. 1032 */ 1033 int 1034 dnode_hold_impl(objset_t *os, uint64_t object, int flag, 1035 void *tag, dnode_t **dnp) 1036 { 1037 int epb, idx, err; 1038 int drop_struct_lock = FALSE; 1039 int type; 1040 uint64_t blk; 1041 dnode_t *mdn, *dn; 1042 dmu_buf_impl_t *db; 1043 dnode_children_t *children_dnodes; 1044 dnode_handle_t *dnh; 1045 1046 /* 1047 * If you are holding the spa config lock as writer, you shouldn't 1048 * be asking the DMU to do *anything* unless it's the root pool 1049 * which may require us to read from the root filesystem while 1050 * holding some (not all) of the locks as writer. 1051 */ 1052 ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 || 1053 (spa_is_root(os->os_spa) && 1054 spa_config_held(os->os_spa, SCL_STATE, RW_WRITER))); 1055 1056 if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) { 1057 dn = (object == DMU_USERUSED_OBJECT) ? 1058 DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os); 1059 if (dn == NULL) 1060 return (SET_ERROR(ENOENT)); 1061 type = dn->dn_type; 1062 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) 1063 return (SET_ERROR(ENOENT)); 1064 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE) 1065 return (SET_ERROR(EEXIST)); 1066 DNODE_VERIFY(dn); 1067 (void) refcount_add(&dn->dn_holds, tag); 1068 *dnp = dn; 1069 return (0); 1070 } 1071 1072 if (object == 0 || object >= DN_MAX_OBJECT) 1073 return (SET_ERROR(EINVAL)); 1074 1075 mdn = DMU_META_DNODE(os); 1076 ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT); 1077 1078 DNODE_VERIFY(mdn); 1079 1080 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) { 1081 rw_enter(&mdn->dn_struct_rwlock, RW_READER); 1082 drop_struct_lock = TRUE; 1083 } 1084 1085 blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t)); 1086 1087 db = dbuf_hold(mdn, blk, FTAG); 1088 if (drop_struct_lock) 1089 rw_exit(&mdn->dn_struct_rwlock); 1090 if (db == NULL) 1091 return (SET_ERROR(EIO)); 1092 err = dbuf_read(db, NULL, DB_RF_CANFAIL); 1093 if (err) { 1094 dbuf_rele(db, FTAG); 1095 return (err); 1096 } 1097 1098 ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT); 1099 epb = db->db.db_size >> DNODE_SHIFT; 1100 1101 idx = object & (epb-1); 1102 1103 ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE); 1104 children_dnodes = dmu_buf_get_user(&db->db); 1105 if (children_dnodes == NULL) { 1106 int i; 1107 dnode_children_t *winner; 1108 children_dnodes = kmem_alloc(sizeof (dnode_children_t) + 1109 epb * sizeof (dnode_handle_t), KM_SLEEP); 1110 children_dnodes->dnc_count = epb; 1111 dnh = &children_dnodes->dnc_children[0]; 1112 for (i = 0; i < epb; i++) { 1113 zrl_init(&dnh[i].dnh_zrlock); 1114 dnh[i].dnh_dnode = NULL; 1115 } 1116 if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL, 1117 dnode_buf_pageout)) { 1118 1119 for (i = 0; i < epb; i++) { 1120 zrl_destroy(&dnh[i].dnh_zrlock); 1121 } 1122 1123 kmem_free(children_dnodes, sizeof (dnode_children_t) + 1124 epb * sizeof (dnode_handle_t)); 1125 children_dnodes = winner; 1126 } 1127 } 1128 ASSERT(children_dnodes->dnc_count == epb); 1129 1130 dnh = &children_dnodes->dnc_children[idx]; 1131 zrl_add(&dnh->dnh_zrlock); 1132 if ((dn = dnh->dnh_dnode) == NULL) { 1133 dnode_phys_t *phys = (dnode_phys_t *)db->db.db_data+idx; 1134 dnode_t *winner; 1135 1136 dn = dnode_create(os, phys, db, object, dnh); 1137 winner = atomic_cas_ptr(&dnh->dnh_dnode, NULL, dn); 1138 if (winner != NULL) { 1139 zrl_add(&dnh->dnh_zrlock); 1140 dnode_destroy(dn); /* implicit zrl_remove() */ 1141 dn = winner; 1142 } 1143 } 1144 1145 mutex_enter(&dn->dn_mtx); 1146 type = dn->dn_type; 1147 if (dn->dn_free_txg || 1148 ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) || 1149 ((flag & DNODE_MUST_BE_FREE) && 1150 (type != DMU_OT_NONE || !refcount_is_zero(&dn->dn_holds)))) { 1151 mutex_exit(&dn->dn_mtx); 1152 zrl_remove(&dnh->dnh_zrlock); 1153 dbuf_rele(db, FTAG); 1154 return (type == DMU_OT_NONE ? ENOENT : EEXIST); 1155 } 1156 mutex_exit(&dn->dn_mtx); 1157 1158 if (refcount_add(&dn->dn_holds, tag) == 1) 1159 dbuf_add_ref(db, dnh); 1160 /* Now we can rely on the hold to prevent the dnode from moving. */ 1161 zrl_remove(&dnh->dnh_zrlock); 1162 1163 DNODE_VERIFY(dn); 1164 ASSERT3P(dn->dn_dbuf, ==, db); 1165 ASSERT3U(dn->dn_object, ==, object); 1166 dbuf_rele(db, FTAG); 1167 1168 *dnp = dn; 1169 return (0); 1170 } 1171 1172 /* 1173 * Return held dnode if the object is allocated, NULL if not. 1174 */ 1175 int 1176 dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp) 1177 { 1178 return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp)); 1179 } 1180 1181 /* 1182 * Can only add a reference if there is already at least one 1183 * reference on the dnode. Returns FALSE if unable to add a 1184 * new reference. 1185 */ 1186 boolean_t 1187 dnode_add_ref(dnode_t *dn, void *tag) 1188 { 1189 mutex_enter(&dn->dn_mtx); 1190 if (refcount_is_zero(&dn->dn_holds)) { 1191 mutex_exit(&dn->dn_mtx); 1192 return (FALSE); 1193 } 1194 VERIFY(1 < refcount_add(&dn->dn_holds, tag)); 1195 mutex_exit(&dn->dn_mtx); 1196 return (TRUE); 1197 } 1198 1199 void 1200 dnode_rele(dnode_t *dn, void *tag) 1201 { 1202 uint64_t refs; 1203 /* Get while the hold prevents the dnode from moving. */ 1204 dmu_buf_impl_t *db = dn->dn_dbuf; 1205 dnode_handle_t *dnh = dn->dn_handle; 1206 1207 mutex_enter(&dn->dn_mtx); 1208 refs = refcount_remove(&dn->dn_holds, tag); 1209 mutex_exit(&dn->dn_mtx); 1210 1211 /* 1212 * It's unsafe to release the last hold on a dnode by dnode_rele() or 1213 * indirectly by dbuf_rele() while relying on the dnode handle to 1214 * prevent the dnode from moving, since releasing the last hold could 1215 * result in the dnode's parent dbuf evicting its dnode handles. For 1216 * that reason anyone calling dnode_rele() or dbuf_rele() without some 1217 * other direct or indirect hold on the dnode must first drop the dnode 1218 * handle. 1219 */ 1220 ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread); 1221 1222 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */ 1223 if (refs == 0 && db != NULL) { 1224 /* 1225 * Another thread could add a hold to the dnode handle in 1226 * dnode_hold_impl() while holding the parent dbuf. Since the 1227 * hold on the parent dbuf prevents the handle from being 1228 * destroyed, the hold on the handle is OK. We can't yet assert 1229 * that the handle has zero references, but that will be 1230 * asserted anyway when the handle gets destroyed. 1231 */ 1232 dbuf_rele(db, dnh); 1233 } 1234 } 1235 1236 void 1237 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx) 1238 { 1239 objset_t *os = dn->dn_objset; 1240 uint64_t txg = tx->tx_txg; 1241 1242 if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) { 1243 dsl_dataset_dirty(os->os_dsl_dataset, tx); 1244 return; 1245 } 1246 1247 DNODE_VERIFY(dn); 1248 1249 #ifdef ZFS_DEBUG 1250 mutex_enter(&dn->dn_mtx); 1251 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg); 1252 ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg); 1253 mutex_exit(&dn->dn_mtx); 1254 #endif 1255 1256 /* 1257 * Determine old uid/gid when necessary 1258 */ 1259 dmu_objset_userquota_get_ids(dn, B_TRUE, tx); 1260 1261 mutex_enter(&os->os_lock); 1262 1263 /* 1264 * If we are already marked dirty, we're done. 1265 */ 1266 if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) { 1267 mutex_exit(&os->os_lock); 1268 return; 1269 } 1270 1271 ASSERT(!refcount_is_zero(&dn->dn_holds) || 1272 !avl_is_empty(&dn->dn_dbufs)); 1273 ASSERT(dn->dn_datablksz != 0); 1274 ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]); 1275 ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]); 1276 ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]); 1277 1278 dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n", 1279 dn->dn_object, txg); 1280 1281 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) { 1282 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn); 1283 } else { 1284 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn); 1285 } 1286 1287 mutex_exit(&os->os_lock); 1288 1289 /* 1290 * The dnode maintains a hold on its containing dbuf as 1291 * long as there are holds on it. Each instantiated child 1292 * dbuf maintains a hold on the dnode. When the last child 1293 * drops its hold, the dnode will drop its hold on the 1294 * containing dbuf. We add a "dirty hold" here so that the 1295 * dnode will hang around after we finish processing its 1296 * children. 1297 */ 1298 VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg)); 1299 1300 (void) dbuf_dirty(dn->dn_dbuf, tx); 1301 1302 dsl_dataset_dirty(os->os_dsl_dataset, tx); 1303 } 1304 1305 void 1306 dnode_free(dnode_t *dn, dmu_tx_t *tx) 1307 { 1308 int txgoff = tx->tx_txg & TXG_MASK; 1309 1310 dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg); 1311 1312 /* we should be the only holder... hopefully */ 1313 /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */ 1314 1315 mutex_enter(&dn->dn_mtx); 1316 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) { 1317 mutex_exit(&dn->dn_mtx); 1318 return; 1319 } 1320 dn->dn_free_txg = tx->tx_txg; 1321 mutex_exit(&dn->dn_mtx); 1322 1323 /* 1324 * If the dnode is already dirty, it needs to be moved from 1325 * the dirty list to the free list. 1326 */ 1327 mutex_enter(&dn->dn_objset->os_lock); 1328 if (list_link_active(&dn->dn_dirty_link[txgoff])) { 1329 list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn); 1330 list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn); 1331 mutex_exit(&dn->dn_objset->os_lock); 1332 } else { 1333 mutex_exit(&dn->dn_objset->os_lock); 1334 dnode_setdirty(dn, tx); 1335 } 1336 } 1337 1338 /* 1339 * Try to change the block size for the indicated dnode. This can only 1340 * succeed if there are no blocks allocated or dirty beyond first block 1341 */ 1342 int 1343 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx) 1344 { 1345 dmu_buf_impl_t *db; 1346 int err; 1347 1348 if (size == 0) 1349 size = SPA_MINBLOCKSIZE; 1350 if (size > SPA_MAXBLOCKSIZE) 1351 size = SPA_MAXBLOCKSIZE; 1352 else 1353 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE); 1354 1355 if (ibs == dn->dn_indblkshift) 1356 ibs = 0; 1357 1358 if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0) 1359 return (0); 1360 1361 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1362 1363 /* Check for any allocated blocks beyond the first */ 1364 if (dn->dn_maxblkid != 0) 1365 goto fail; 1366 1367 mutex_enter(&dn->dn_dbufs_mtx); 1368 for (db = avl_first(&dn->dn_dbufs); db != NULL; 1369 db = AVL_NEXT(&dn->dn_dbufs, db)) { 1370 if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID && 1371 db->db_blkid != DMU_SPILL_BLKID) { 1372 mutex_exit(&dn->dn_dbufs_mtx); 1373 goto fail; 1374 } 1375 } 1376 mutex_exit(&dn->dn_dbufs_mtx); 1377 1378 if (ibs && dn->dn_nlevels != 1) 1379 goto fail; 1380 1381 /* resize the old block */ 1382 err = dbuf_hold_impl(dn, 0, 0, TRUE, FTAG, &db); 1383 if (err == 0) 1384 dbuf_new_size(db, size, tx); 1385 else if (err != ENOENT) 1386 goto fail; 1387 1388 dnode_setdblksz(dn, size); 1389 dnode_setdirty(dn, tx); 1390 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size; 1391 if (ibs) { 1392 dn->dn_indblkshift = ibs; 1393 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs; 1394 } 1395 /* rele after we have fixed the blocksize in the dnode */ 1396 if (db) 1397 dbuf_rele(db, FTAG); 1398 1399 rw_exit(&dn->dn_struct_rwlock); 1400 return (0); 1401 1402 fail: 1403 rw_exit(&dn->dn_struct_rwlock); 1404 return (SET_ERROR(ENOTSUP)); 1405 } 1406 1407 /* read-holding callers must not rely on the lock being continuously held */ 1408 void 1409 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read) 1410 { 1411 uint64_t txgoff = tx->tx_txg & TXG_MASK; 1412 int epbs, new_nlevels; 1413 uint64_t sz; 1414 1415 ASSERT(blkid != DMU_BONUS_BLKID); 1416 1417 ASSERT(have_read ? 1418 RW_READ_HELD(&dn->dn_struct_rwlock) : 1419 RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1420 1421 /* 1422 * if we have a read-lock, check to see if we need to do any work 1423 * before upgrading to a write-lock. 1424 */ 1425 if (have_read) { 1426 if (blkid <= dn->dn_maxblkid) 1427 return; 1428 1429 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) { 1430 rw_exit(&dn->dn_struct_rwlock); 1431 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1432 } 1433 } 1434 1435 if (blkid <= dn->dn_maxblkid) 1436 goto out; 1437 1438 dn->dn_maxblkid = blkid; 1439 1440 /* 1441 * Compute the number of levels necessary to support the new maxblkid. 1442 */ 1443 new_nlevels = 1; 1444 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1445 for (sz = dn->dn_nblkptr; 1446 sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs) 1447 new_nlevels++; 1448 1449 if (new_nlevels > dn->dn_nlevels) { 1450 int old_nlevels = dn->dn_nlevels; 1451 dmu_buf_impl_t *db; 1452 list_t *list; 1453 dbuf_dirty_record_t *new, *dr, *dr_next; 1454 1455 dn->dn_nlevels = new_nlevels; 1456 1457 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]); 1458 dn->dn_next_nlevels[txgoff] = new_nlevels; 1459 1460 /* dirty the left indirects */ 1461 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG); 1462 ASSERT(db != NULL); 1463 new = dbuf_dirty(db, tx); 1464 dbuf_rele(db, FTAG); 1465 1466 /* transfer the dirty records to the new indirect */ 1467 mutex_enter(&dn->dn_mtx); 1468 mutex_enter(&new->dt.di.dr_mtx); 1469 list = &dn->dn_dirty_records[txgoff]; 1470 for (dr = list_head(list); dr; dr = dr_next) { 1471 dr_next = list_next(&dn->dn_dirty_records[txgoff], dr); 1472 if (dr->dr_dbuf->db_level != new_nlevels-1 && 1473 dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 1474 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 1475 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1); 1476 list_remove(&dn->dn_dirty_records[txgoff], dr); 1477 list_insert_tail(&new->dt.di.dr_children, dr); 1478 dr->dr_parent = new; 1479 } 1480 } 1481 mutex_exit(&new->dt.di.dr_mtx); 1482 mutex_exit(&dn->dn_mtx); 1483 } 1484 1485 out: 1486 if (have_read) 1487 rw_downgrade(&dn->dn_struct_rwlock); 1488 } 1489 1490 void 1491 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx) 1492 { 1493 dmu_buf_impl_t *db; 1494 uint64_t blkoff, blkid, nblks; 1495 int blksz, blkshift, head, tail; 1496 int trunc = FALSE; 1497 int epbs; 1498 1499 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1500 blksz = dn->dn_datablksz; 1501 blkshift = dn->dn_datablkshift; 1502 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1503 1504 if (len == DMU_OBJECT_END) { 1505 len = UINT64_MAX - off; 1506 trunc = TRUE; 1507 } 1508 1509 /* 1510 * First, block align the region to free: 1511 */ 1512 if (ISP2(blksz)) { 1513 head = P2NPHASE(off, blksz); 1514 blkoff = P2PHASE(off, blksz); 1515 if ((off >> blkshift) > dn->dn_maxblkid) 1516 goto out; 1517 } else { 1518 ASSERT(dn->dn_maxblkid == 0); 1519 if (off == 0 && len >= blksz) { 1520 /* 1521 * Freeing the whole block; fast-track this request. 1522 * Note that we won't dirty any indirect blocks, 1523 * which is fine because we will be freeing the entire 1524 * file and thus all indirect blocks will be freed 1525 * by free_children(). 1526 */ 1527 blkid = 0; 1528 nblks = 1; 1529 goto done; 1530 } else if (off >= blksz) { 1531 /* Freeing past end-of-data */ 1532 goto out; 1533 } else { 1534 /* Freeing part of the block. */ 1535 head = blksz - off; 1536 ASSERT3U(head, >, 0); 1537 } 1538 blkoff = off; 1539 } 1540 /* zero out any partial block data at the start of the range */ 1541 if (head) { 1542 ASSERT3U(blkoff + head, ==, blksz); 1543 if (len < head) 1544 head = len; 1545 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE, 1546 FTAG, &db) == 0) { 1547 caddr_t data; 1548 1549 /* don't dirty if it isn't on disk and isn't dirty */ 1550 if (db->db_last_dirty || 1551 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { 1552 rw_exit(&dn->dn_struct_rwlock); 1553 dmu_buf_will_dirty(&db->db, tx); 1554 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1555 data = db->db.db_data; 1556 bzero(data + blkoff, head); 1557 } 1558 dbuf_rele(db, FTAG); 1559 } 1560 off += head; 1561 len -= head; 1562 } 1563 1564 /* If the range was less than one block, we're done */ 1565 if (len == 0) 1566 goto out; 1567 1568 /* If the remaining range is past end of file, we're done */ 1569 if ((off >> blkshift) > dn->dn_maxblkid) 1570 goto out; 1571 1572 ASSERT(ISP2(blksz)); 1573 if (trunc) 1574 tail = 0; 1575 else 1576 tail = P2PHASE(len, blksz); 1577 1578 ASSERT0(P2PHASE(off, blksz)); 1579 /* zero out any partial block data at the end of the range */ 1580 if (tail) { 1581 if (len < tail) 1582 tail = len; 1583 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len), 1584 TRUE, FTAG, &db) == 0) { 1585 /* don't dirty if not on disk and not dirty */ 1586 if (db->db_last_dirty || 1587 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { 1588 rw_exit(&dn->dn_struct_rwlock); 1589 dmu_buf_will_dirty(&db->db, tx); 1590 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1591 bzero(db->db.db_data, tail); 1592 } 1593 dbuf_rele(db, FTAG); 1594 } 1595 len -= tail; 1596 } 1597 1598 /* If the range did not include a full block, we are done */ 1599 if (len == 0) 1600 goto out; 1601 1602 ASSERT(IS_P2ALIGNED(off, blksz)); 1603 ASSERT(trunc || IS_P2ALIGNED(len, blksz)); 1604 blkid = off >> blkshift; 1605 nblks = len >> blkshift; 1606 if (trunc) 1607 nblks += 1; 1608 1609 /* 1610 * Dirty the first and last indirect blocks, as they (and/or their 1611 * parents) will need to be written out if they were only 1612 * partially freed. Interior indirect blocks will be themselves freed, 1613 * by free_children(), so they need not be dirtied. Note that these 1614 * interior blocks have already been prefetched by dmu_tx_hold_free(). 1615 */ 1616 if (dn->dn_nlevels > 1) { 1617 uint64_t first, last; 1618 1619 first = blkid >> epbs; 1620 if (db = dbuf_hold_level(dn, 1, first, FTAG)) { 1621 dmu_buf_will_dirty(&db->db, tx); 1622 dbuf_rele(db, FTAG); 1623 } 1624 if (trunc) 1625 last = dn->dn_maxblkid >> epbs; 1626 else 1627 last = (blkid + nblks - 1) >> epbs; 1628 if (last > first && (db = dbuf_hold_level(dn, 1, last, FTAG))) { 1629 dmu_buf_will_dirty(&db->db, tx); 1630 dbuf_rele(db, FTAG); 1631 } 1632 } 1633 1634 done: 1635 /* 1636 * Add this range to the dnode range list. 1637 * We will finish up this free operation in the syncing phase. 1638 */ 1639 mutex_enter(&dn->dn_mtx); 1640 int txgoff = tx->tx_txg & TXG_MASK; 1641 if (dn->dn_free_ranges[txgoff] == NULL) { 1642 dn->dn_free_ranges[txgoff] = 1643 range_tree_create(NULL, NULL, &dn->dn_mtx); 1644 } 1645 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks); 1646 range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks); 1647 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n", 1648 blkid, nblks, tx->tx_txg); 1649 mutex_exit(&dn->dn_mtx); 1650 1651 dbuf_free_range(dn, blkid, blkid + nblks - 1, tx); 1652 dnode_setdirty(dn, tx); 1653 out: 1654 1655 rw_exit(&dn->dn_struct_rwlock); 1656 } 1657 1658 static boolean_t 1659 dnode_spill_freed(dnode_t *dn) 1660 { 1661 int i; 1662 1663 mutex_enter(&dn->dn_mtx); 1664 for (i = 0; i < TXG_SIZE; i++) { 1665 if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK) 1666 break; 1667 } 1668 mutex_exit(&dn->dn_mtx); 1669 return (i < TXG_SIZE); 1670 } 1671 1672 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */ 1673 uint64_t 1674 dnode_block_freed(dnode_t *dn, uint64_t blkid) 1675 { 1676 void *dp = spa_get_dsl(dn->dn_objset->os_spa); 1677 int i; 1678 1679 if (blkid == DMU_BONUS_BLKID) 1680 return (FALSE); 1681 1682 /* 1683 * If we're in the process of opening the pool, dp will not be 1684 * set yet, but there shouldn't be anything dirty. 1685 */ 1686 if (dp == NULL) 1687 return (FALSE); 1688 1689 if (dn->dn_free_txg) 1690 return (TRUE); 1691 1692 if (blkid == DMU_SPILL_BLKID) 1693 return (dnode_spill_freed(dn)); 1694 1695 mutex_enter(&dn->dn_mtx); 1696 for (i = 0; i < TXG_SIZE; i++) { 1697 if (dn->dn_free_ranges[i] != NULL && 1698 range_tree_contains(dn->dn_free_ranges[i], blkid, 1)) 1699 break; 1700 } 1701 mutex_exit(&dn->dn_mtx); 1702 return (i < TXG_SIZE); 1703 } 1704 1705 /* call from syncing context when we actually write/free space for this dnode */ 1706 void 1707 dnode_diduse_space(dnode_t *dn, int64_t delta) 1708 { 1709 uint64_t space; 1710 dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n", 1711 dn, dn->dn_phys, 1712 (u_longlong_t)dn->dn_phys->dn_used, 1713 (longlong_t)delta); 1714 1715 mutex_enter(&dn->dn_mtx); 1716 space = DN_USED_BYTES(dn->dn_phys); 1717 if (delta > 0) { 1718 ASSERT3U(space + delta, >=, space); /* no overflow */ 1719 } else { 1720 ASSERT3U(space, >=, -delta); /* no underflow */ 1721 } 1722 space += delta; 1723 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) { 1724 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0); 1725 ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT)); 1726 dn->dn_phys->dn_used = space >> DEV_BSHIFT; 1727 } else { 1728 dn->dn_phys->dn_used = space; 1729 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES; 1730 } 1731 mutex_exit(&dn->dn_mtx); 1732 } 1733 1734 /* 1735 * Call when we think we're going to write/free space in open context to track 1736 * the amount of memory in use by the currently open txg. 1737 */ 1738 void 1739 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx) 1740 { 1741 objset_t *os = dn->dn_objset; 1742 dsl_dataset_t *ds = os->os_dsl_dataset; 1743 int64_t aspace = spa_get_asize(os->os_spa, space); 1744 1745 if (ds != NULL) { 1746 dsl_dir_willuse_space(ds->ds_dir, aspace, tx); 1747 dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx); 1748 } 1749 1750 dmu_tx_willuse_space(tx, aspace); 1751 } 1752 1753 /* 1754 * Scans a block at the indicated "level" looking for a hole or data, 1755 * depending on 'flags'. 1756 * 1757 * If level > 0, then we are scanning an indirect block looking at its 1758 * pointers. If level == 0, then we are looking at a block of dnodes. 1759 * 1760 * If we don't find what we are looking for in the block, we return ESRCH. 1761 * Otherwise, return with *offset pointing to the beginning (if searching 1762 * forwards) or end (if searching backwards) of the range covered by the 1763 * block pointer we matched on (or dnode). 1764 * 1765 * The basic search algorithm used below by dnode_next_offset() is to 1766 * use this function to search up the block tree (widen the search) until 1767 * we find something (i.e., we don't return ESRCH) and then search back 1768 * down the tree (narrow the search) until we reach our original search 1769 * level. 1770 */ 1771 static int 1772 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset, 1773 int lvl, uint64_t blkfill, uint64_t txg) 1774 { 1775 dmu_buf_impl_t *db = NULL; 1776 void *data = NULL; 1777 uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 1778 uint64_t epb = 1ULL << epbs; 1779 uint64_t minfill, maxfill; 1780 boolean_t hole; 1781 int i, inc, error, span; 1782 1783 dprintf("probing object %llu offset %llx level %d of %u\n", 1784 dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels); 1785 1786 hole = ((flags & DNODE_FIND_HOLE) != 0); 1787 inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1; 1788 ASSERT(txg == 0 || !hole); 1789 1790 if (lvl == dn->dn_phys->dn_nlevels) { 1791 error = 0; 1792 epb = dn->dn_phys->dn_nblkptr; 1793 data = dn->dn_phys->dn_blkptr; 1794 } else { 1795 uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl); 1796 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db); 1797 if (error) { 1798 if (error != ENOENT) 1799 return (error); 1800 if (hole) 1801 return (0); 1802 /* 1803 * This can only happen when we are searching up 1804 * the block tree for data. We don't really need to 1805 * adjust the offset, as we will just end up looking 1806 * at the pointer to this block in its parent, and its 1807 * going to be unallocated, so we will skip over it. 1808 */ 1809 return (SET_ERROR(ESRCH)); 1810 } 1811 error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT); 1812 if (error) { 1813 dbuf_rele(db, FTAG); 1814 return (error); 1815 } 1816 data = db->db.db_data; 1817 } 1818 1819 1820 if (db != NULL && txg != 0 && (db->db_blkptr == NULL || 1821 db->db_blkptr->blk_birth <= txg || 1822 BP_IS_HOLE(db->db_blkptr))) { 1823 /* 1824 * This can only happen when we are searching up the tree 1825 * and these conditions mean that we need to keep climbing. 1826 */ 1827 error = SET_ERROR(ESRCH); 1828 } else if (lvl == 0) { 1829 dnode_phys_t *dnp = data; 1830 span = DNODE_SHIFT; 1831 ASSERT(dn->dn_type == DMU_OT_DNODE); 1832 1833 for (i = (*offset >> span) & (blkfill - 1); 1834 i >= 0 && i < blkfill; i += inc) { 1835 if ((dnp[i].dn_type == DMU_OT_NONE) == hole) 1836 break; 1837 *offset += (1ULL << span) * inc; 1838 } 1839 if (i < 0 || i == blkfill) 1840 error = SET_ERROR(ESRCH); 1841 } else { 1842 blkptr_t *bp = data; 1843 uint64_t start = *offset; 1844 span = (lvl - 1) * epbs + dn->dn_datablkshift; 1845 minfill = 0; 1846 maxfill = blkfill << ((lvl - 1) * epbs); 1847 1848 if (hole) 1849 maxfill--; 1850 else 1851 minfill++; 1852 1853 *offset = *offset >> span; 1854 for (i = BF64_GET(*offset, 0, epbs); 1855 i >= 0 && i < epb; i += inc) { 1856 if (BP_GET_FILL(&bp[i]) >= minfill && 1857 BP_GET_FILL(&bp[i]) <= maxfill && 1858 (hole || bp[i].blk_birth > txg)) 1859 break; 1860 if (inc > 0 || *offset > 0) 1861 *offset += inc; 1862 } 1863 *offset = *offset << span; 1864 if (inc < 0) { 1865 /* traversing backwards; position offset at the end */ 1866 ASSERT3U(*offset, <=, start); 1867 *offset = MIN(*offset + (1ULL << span) - 1, start); 1868 } else if (*offset < start) { 1869 *offset = start; 1870 } 1871 if (i < 0 || i >= epb) 1872 error = SET_ERROR(ESRCH); 1873 } 1874 1875 if (db) 1876 dbuf_rele(db, FTAG); 1877 1878 return (error); 1879 } 1880 1881 /* 1882 * Find the next hole, data, or sparse region at or after *offset. 1883 * The value 'blkfill' tells us how many items we expect to find 1884 * in an L0 data block; this value is 1 for normal objects, 1885 * DNODES_PER_BLOCK for the meta dnode, and some fraction of 1886 * DNODES_PER_BLOCK when searching for sparse regions thereof. 1887 * 1888 * Examples: 1889 * 1890 * dnode_next_offset(dn, flags, offset, 1, 1, 0); 1891 * Finds the next/previous hole/data in a file. 1892 * Used in dmu_offset_next(). 1893 * 1894 * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg); 1895 * Finds the next free/allocated dnode an objset's meta-dnode. 1896 * Only finds objects that have new contents since txg (ie. 1897 * bonus buffer changes and content removal are ignored). 1898 * Used in dmu_object_next(). 1899 * 1900 * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0); 1901 * Finds the next L2 meta-dnode bp that's at most 1/4 full. 1902 * Used in dmu_object_alloc(). 1903 */ 1904 int 1905 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset, 1906 int minlvl, uint64_t blkfill, uint64_t txg) 1907 { 1908 uint64_t initial_offset = *offset; 1909 int lvl, maxlvl; 1910 int error = 0; 1911 1912 if (!(flags & DNODE_FIND_HAVELOCK)) 1913 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1914 1915 if (dn->dn_phys->dn_nlevels == 0) { 1916 error = SET_ERROR(ESRCH); 1917 goto out; 1918 } 1919 1920 if (dn->dn_datablkshift == 0) { 1921 if (*offset < dn->dn_datablksz) { 1922 if (flags & DNODE_FIND_HOLE) 1923 *offset = dn->dn_datablksz; 1924 } else { 1925 error = SET_ERROR(ESRCH); 1926 } 1927 goto out; 1928 } 1929 1930 maxlvl = dn->dn_phys->dn_nlevels; 1931 1932 for (lvl = minlvl; lvl <= maxlvl; lvl++) { 1933 error = dnode_next_offset_level(dn, 1934 flags, offset, lvl, blkfill, txg); 1935 if (error != ESRCH) 1936 break; 1937 } 1938 1939 while (error == 0 && --lvl >= minlvl) { 1940 error = dnode_next_offset_level(dn, 1941 flags, offset, lvl, blkfill, txg); 1942 } 1943 1944 if (error == 0 && (flags & DNODE_FIND_BACKWARDS ? 1945 initial_offset < *offset : initial_offset > *offset)) 1946 error = SET_ERROR(ESRCH); 1947 out: 1948 if (!(flags & DNODE_FIND_HAVELOCK)) 1949 rw_exit(&dn->dn_struct_rwlock); 1950 1951 return (error); 1952 } 1953