1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 RackTop Systems.
27 */
28
29 #include <sys/disp.h>
30 #include <sys/zfs_context.h>
31 #include <sys/dbuf.h>
32 #include <sys/dnode.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_impl.h>
35 #include <sys/dmu_tx.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/spa.h>
40 #include <sys/zio.h>
41 #include <sys/dmu_zfetch.h>
42 #include <sys/range_tree.h>
43 #include <sys/zfs_project.h>
44
45 dnode_stats_t dnode_stats = {
46 { "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
47 { "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
48 { "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
49 { "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
50 { "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
51 { "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
52 { "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
53 { "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
54 { "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
55 { "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
56 { "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
57 { "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
58 { "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
59 { "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
60 { "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
61 { "dnode_allocate", KSTAT_DATA_UINT64 },
62 { "dnode_reallocate", KSTAT_DATA_UINT64 },
63 { "dnode_buf_evict", KSTAT_DATA_UINT64 },
64 { "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
65 { "dnode_alloc_race", KSTAT_DATA_UINT64 },
66 { "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
67 { "dnode_move_invalid", KSTAT_DATA_UINT64 },
68 { "dnode_move_recheck1", KSTAT_DATA_UINT64 },
69 { "dnode_move_recheck2", KSTAT_DATA_UINT64 },
70 { "dnode_move_special", KSTAT_DATA_UINT64 },
71 { "dnode_move_handle", KSTAT_DATA_UINT64 },
72 { "dnode_move_rwlock", KSTAT_DATA_UINT64 },
73 { "dnode_move_active", KSTAT_DATA_UINT64 },
74 };
75
76 static kstat_t *dnode_ksp;
77 static kmem_cache_t *dnode_cache;
78
79 static dnode_phys_t dnode_phys_zero;
80
81 int zfs_default_bs = SPA_MINBLOCKSHIFT;
82 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
83
84 #ifdef _KERNEL
85 static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
86 #endif /* _KERNEL */
87
88 static int
dbuf_compare(const void * x1,const void * x2)89 dbuf_compare(const void *x1, const void *x2)
90 {
91 const dmu_buf_impl_t *d1 = x1;
92 const dmu_buf_impl_t *d2 = x2;
93
94 int cmp = TREE_CMP(d1->db_level, d2->db_level);
95 if (likely(cmp))
96 return (cmp);
97
98 cmp = TREE_CMP(d1->db_blkid, d2->db_blkid);
99 if (likely(cmp))
100 return (cmp);
101
102 if (d1->db_state == DB_SEARCH) {
103 ASSERT3S(d2->db_state, !=, DB_SEARCH);
104 return (-1);
105 } else if (d2->db_state == DB_SEARCH) {
106 ASSERT3S(d1->db_state, !=, DB_SEARCH);
107 return (1);
108 }
109
110 return (TREE_PCMP(d1, d2));
111 }
112
113 /* ARGSUSED */
114 static int
dnode_cons(void * arg,void * unused,int kmflag)115 dnode_cons(void *arg, void *unused, int kmflag)
116 {
117 dnode_t *dn = arg;
118 int i;
119
120 rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
121 mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
122 mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
123 cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
124 cv_init(&dn->dn_nodnholds, NULL, CV_DEFAULT, NULL);
125
126 /*
127 * Every dbuf has a reference, and dropping a tracked reference is
128 * O(number of references), so don't track dn_holds.
129 */
130 zfs_refcount_create_untracked(&dn->dn_holds);
131 zfs_refcount_create(&dn->dn_tx_holds);
132 list_link_init(&dn->dn_link);
133
134 bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
135 bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
136 bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
137 bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
138 bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
139 bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
140 bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
141 bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid));
142
143 for (i = 0; i < TXG_SIZE; i++) {
144 multilist_link_init(&dn->dn_dirty_link[i]);
145 dn->dn_free_ranges[i] = NULL;
146 list_create(&dn->dn_dirty_records[i],
147 sizeof (dbuf_dirty_record_t),
148 offsetof(dbuf_dirty_record_t, dr_dirty_node));
149 }
150
151 dn->dn_allocated_txg = 0;
152 dn->dn_free_txg = 0;
153 dn->dn_assigned_txg = 0;
154 dn->dn_dirty_txg = 0;
155 dn->dn_dirtyctx = 0;
156 dn->dn_dirtyctx_firstset = NULL;
157 dn->dn_bonus = NULL;
158 dn->dn_have_spill = B_FALSE;
159 dn->dn_zio = NULL;
160 dn->dn_oldused = 0;
161 dn->dn_oldflags = 0;
162 dn->dn_olduid = 0;
163 dn->dn_oldgid = 0;
164 dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
165 dn->dn_newuid = 0;
166 dn->dn_newgid = 0;
167 dn->dn_newprojid = ZFS_DEFAULT_PROJID;
168 dn->dn_id_flags = 0;
169
170 dn->dn_dbufs_count = 0;
171 avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
172 offsetof(dmu_buf_impl_t, db_link));
173
174 dn->dn_moved = 0;
175 return (0);
176 }
177
178 /* ARGSUSED */
179 static void
dnode_dest(void * arg,void * unused)180 dnode_dest(void *arg, void *unused)
181 {
182 int i;
183 dnode_t *dn = arg;
184
185 rw_destroy(&dn->dn_struct_rwlock);
186 mutex_destroy(&dn->dn_mtx);
187 mutex_destroy(&dn->dn_dbufs_mtx);
188 cv_destroy(&dn->dn_notxholds);
189 cv_destroy(&dn->dn_nodnholds);
190 zfs_refcount_destroy(&dn->dn_holds);
191 zfs_refcount_destroy(&dn->dn_tx_holds);
192 ASSERT(!list_link_active(&dn->dn_link));
193
194 for (i = 0; i < TXG_SIZE; i++) {
195 ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
196 ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
197 list_destroy(&dn->dn_dirty_records[i]);
198 ASSERT0(dn->dn_next_nblkptr[i]);
199 ASSERT0(dn->dn_next_nlevels[i]);
200 ASSERT0(dn->dn_next_indblkshift[i]);
201 ASSERT0(dn->dn_next_bonustype[i]);
202 ASSERT0(dn->dn_rm_spillblk[i]);
203 ASSERT0(dn->dn_next_bonuslen[i]);
204 ASSERT0(dn->dn_next_blksz[i]);
205 ASSERT0(dn->dn_next_maxblkid[i]);
206 }
207
208 ASSERT0(dn->dn_allocated_txg);
209 ASSERT0(dn->dn_free_txg);
210 ASSERT0(dn->dn_assigned_txg);
211 ASSERT0(dn->dn_dirty_txg);
212 ASSERT0(dn->dn_dirtyctx);
213 ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
214 ASSERT3P(dn->dn_bonus, ==, NULL);
215 ASSERT(!dn->dn_have_spill);
216 ASSERT3P(dn->dn_zio, ==, NULL);
217 ASSERT0(dn->dn_oldused);
218 ASSERT0(dn->dn_oldflags);
219 ASSERT0(dn->dn_olduid);
220 ASSERT0(dn->dn_oldgid);
221 ASSERT0(dn->dn_oldprojid);
222 ASSERT0(dn->dn_newuid);
223 ASSERT0(dn->dn_newgid);
224 ASSERT0(dn->dn_newprojid);
225 ASSERT0(dn->dn_id_flags);
226
227 ASSERT0(dn->dn_dbufs_count);
228 avl_destroy(&dn->dn_dbufs);
229 }
230
231 void
dnode_init(void)232 dnode_init(void)
233 {
234 ASSERT(dnode_cache == NULL);
235 dnode_cache = kmem_cache_create("dnode_t",
236 sizeof (dnode_t),
237 0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
238 #ifdef _KERNEL
239 kmem_cache_set_move(dnode_cache, dnode_move);
240
241 dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
242 KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
243 KSTAT_FLAG_VIRTUAL);
244 if (dnode_ksp != NULL) {
245 dnode_ksp->ks_data = &dnode_stats;
246 kstat_install(dnode_ksp);
247 }
248 #endif /* _KERNEL */
249 }
250
251 void
dnode_fini(void)252 dnode_fini(void)
253 {
254 if (dnode_ksp != NULL) {
255 kstat_delete(dnode_ksp);
256 dnode_ksp = NULL;
257 }
258
259 kmem_cache_destroy(dnode_cache);
260 dnode_cache = NULL;
261 }
262
263
264 #ifdef ZFS_DEBUG
265 void
dnode_verify(dnode_t * dn)266 dnode_verify(dnode_t *dn)
267 {
268 int drop_struct_lock = FALSE;
269
270 ASSERT(dn->dn_phys);
271 ASSERT(dn->dn_objset);
272 ASSERT(dn->dn_handle->dnh_dnode == dn);
273
274 ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
275
276 if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
277 return;
278
279 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
280 rw_enter(&dn->dn_struct_rwlock, RW_READER);
281 drop_struct_lock = TRUE;
282 }
283 if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
284 int i;
285 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
286 ASSERT3U(dn->dn_indblkshift, >=, 0);
287 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
288 if (dn->dn_datablkshift) {
289 ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
290 ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
291 ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
292 }
293 ASSERT3U(dn->dn_nlevels, <=, 30);
294 ASSERT(DMU_OT_IS_VALID(dn->dn_type));
295 ASSERT3U(dn->dn_nblkptr, >=, 1);
296 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
297 ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
298 ASSERT3U(dn->dn_datablksz, ==,
299 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
300 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
301 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
302 dn->dn_bonuslen, <=, max_bonuslen);
303 for (i = 0; i < TXG_SIZE; i++) {
304 ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
305 }
306 }
307 if (dn->dn_phys->dn_type != DMU_OT_NONE)
308 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
309 ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
310 if (dn->dn_dbuf != NULL) {
311 ASSERT3P(dn->dn_phys, ==,
312 (dnode_phys_t *)dn->dn_dbuf->db.db_data +
313 (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
314 }
315 if (drop_struct_lock)
316 rw_exit(&dn->dn_struct_rwlock);
317 }
318 #endif
319
320 void
dnode_byteswap(dnode_phys_t * dnp)321 dnode_byteswap(dnode_phys_t *dnp)
322 {
323 uint64_t *buf64 = (void*)&dnp->dn_blkptr;
324 int i;
325
326 if (dnp->dn_type == DMU_OT_NONE) {
327 bzero(dnp, sizeof (dnode_phys_t));
328 return;
329 }
330
331 dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
332 dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
333 dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
334 dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
335 dnp->dn_used = BSWAP_64(dnp->dn_used);
336
337 /*
338 * dn_nblkptr is only one byte, so it's OK to read it in either
339 * byte order. We can't read dn_bouslen.
340 */
341 ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
342 ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
343 for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
344 buf64[i] = BSWAP_64(buf64[i]);
345
346 /*
347 * OK to check dn_bonuslen for zero, because it won't matter if
348 * we have the wrong byte order. This is necessary because the
349 * dnode dnode is smaller than a regular dnode.
350 */
351 if (dnp->dn_bonuslen != 0) {
352 dmu_object_byteswap_t byteswap;
353 ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
354 byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
355 dmu_ot_byteswap[byteswap].ob_func(DN_BONUS(dnp),
356 DN_MAX_BONUS_LEN(dnp));
357 }
358
359 /* Swap SPILL block if we have one */
360 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
361 byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
362
363 }
364
365 void
dnode_buf_byteswap(void * vbuf,size_t size)366 dnode_buf_byteswap(void *vbuf, size_t size)
367 {
368 int i = 0;
369
370 ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
371 ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
372
373 while (i < size) {
374 dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
375 dnode_byteswap(dnp);
376
377 i += DNODE_MIN_SIZE;
378 if (dnp->dn_type != DMU_OT_NONE)
379 i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
380 }
381 }
382
383 void
dnode_setbonuslen(dnode_t * dn,int newsize,dmu_tx_t * tx)384 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
385 {
386 ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
387
388 dnode_setdirty(dn, tx);
389 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
390 ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
391 (dn->dn_nblkptr-1) * sizeof (blkptr_t));
392 dn->dn_bonuslen = newsize;
393 if (newsize == 0)
394 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
395 else
396 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
397 rw_exit(&dn->dn_struct_rwlock);
398 }
399
400 void
dnode_setbonus_type(dnode_t * dn,dmu_object_type_t newtype,dmu_tx_t * tx)401 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
402 {
403 ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
404 dnode_setdirty(dn, tx);
405 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
406 dn->dn_bonustype = newtype;
407 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
408 rw_exit(&dn->dn_struct_rwlock);
409 }
410
411 void
dnode_rm_spill(dnode_t * dn,dmu_tx_t * tx)412 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
413 {
414 ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
415 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
416 dnode_setdirty(dn, tx);
417 dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
418 dn->dn_have_spill = B_FALSE;
419 }
420
421 static void
dnode_setdblksz(dnode_t * dn,int size)422 dnode_setdblksz(dnode_t *dn, int size)
423 {
424 ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
425 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
426 ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
427 ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
428 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
429 dn->dn_datablksz = size;
430 dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
431 dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
432 }
433
434 static dnode_t *
dnode_create(objset_t * os,dnode_phys_t * dnp,dmu_buf_impl_t * db,uint64_t object,dnode_handle_t * dnh)435 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
436 uint64_t object, dnode_handle_t *dnh)
437 {
438 dnode_t *dn;
439
440 dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
441 #ifdef _KERNEL
442 ASSERT(!POINTER_IS_VALID(dn->dn_objset));
443 #endif /* _KERNEL */
444 dn->dn_moved = 0;
445
446 /*
447 * Defer setting dn_objset until the dnode is ready to be a candidate
448 * for the dnode_move() callback.
449 */
450 dn->dn_object = object;
451 dn->dn_dbuf = db;
452 dn->dn_handle = dnh;
453 dn->dn_phys = dnp;
454
455 if (dnp->dn_datablkszsec) {
456 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
457 } else {
458 dn->dn_datablksz = 0;
459 dn->dn_datablkszsec = 0;
460 dn->dn_datablkshift = 0;
461 }
462 dn->dn_indblkshift = dnp->dn_indblkshift;
463 dn->dn_nlevels = dnp->dn_nlevels;
464 dn->dn_type = dnp->dn_type;
465 dn->dn_nblkptr = dnp->dn_nblkptr;
466 dn->dn_checksum = dnp->dn_checksum;
467 dn->dn_compress = dnp->dn_compress;
468 dn->dn_bonustype = dnp->dn_bonustype;
469 dn->dn_bonuslen = dnp->dn_bonuslen;
470 dn->dn_num_slots = dnp->dn_extra_slots + 1;
471 dn->dn_maxblkid = dnp->dn_maxblkid;
472 dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
473 dn->dn_id_flags = 0;
474
475 dmu_zfetch_init(&dn->dn_zfetch, dn);
476
477 ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
478 ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
479 ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
480
481 mutex_enter(&os->os_lock);
482
483 /*
484 * Exclude special dnodes from os_dnodes so an empty os_dnodes
485 * signifies that the special dnodes have no references from
486 * their children (the entries in os_dnodes). This allows
487 * dnode_destroy() to easily determine if the last child has
488 * been removed and then complete eviction of the objset.
489 */
490 if (!DMU_OBJECT_IS_SPECIAL(object))
491 list_insert_head(&os->os_dnodes, dn);
492 membar_producer();
493
494 /*
495 * Everything else must be valid before assigning dn_objset
496 * makes the dnode eligible for dnode_move().
497 */
498 dn->dn_objset = os;
499
500 dnh->dnh_dnode = dn;
501 mutex_exit(&os->os_lock);
502
503 arc_space_consume(sizeof (dnode_t), ARC_SPACE_OTHER);
504
505 return (dn);
506 }
507
508 /*
509 * Caller must be holding the dnode handle, which is released upon return.
510 */
511 static void
dnode_destroy(dnode_t * dn)512 dnode_destroy(dnode_t *dn)
513 {
514 objset_t *os = dn->dn_objset;
515 boolean_t complete_os_eviction = B_FALSE;
516
517 ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
518
519 mutex_enter(&os->os_lock);
520 POINTER_INVALIDATE(&dn->dn_objset);
521 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
522 list_remove(&os->os_dnodes, dn);
523 complete_os_eviction =
524 list_is_empty(&os->os_dnodes) &&
525 list_link_active(&os->os_evicting_node);
526 }
527 mutex_exit(&os->os_lock);
528
529 /* the dnode can no longer move, so we can release the handle */
530 if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
531 zrl_remove(&dn->dn_handle->dnh_zrlock);
532
533 dn->dn_allocated_txg = 0;
534 dn->dn_free_txg = 0;
535 dn->dn_assigned_txg = 0;
536 dn->dn_dirty_txg = 0;
537
538 dn->dn_dirtyctx = 0;
539 if (dn->dn_dirtyctx_firstset != NULL) {
540 kmem_free(dn->dn_dirtyctx_firstset, 1);
541 dn->dn_dirtyctx_firstset = NULL;
542 }
543 if (dn->dn_bonus != NULL) {
544 mutex_enter(&dn->dn_bonus->db_mtx);
545 dbuf_destroy(dn->dn_bonus);
546 dn->dn_bonus = NULL;
547 }
548 dn->dn_zio = NULL;
549
550 dn->dn_have_spill = B_FALSE;
551 dn->dn_oldused = 0;
552 dn->dn_oldflags = 0;
553 dn->dn_olduid = 0;
554 dn->dn_oldgid = 0;
555 dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
556 dn->dn_newuid = 0;
557 dn->dn_newgid = 0;
558 dn->dn_newprojid = ZFS_DEFAULT_PROJID;
559 dn->dn_id_flags = 0;
560
561 dmu_zfetch_fini(&dn->dn_zfetch);
562 kmem_cache_free(dnode_cache, dn);
563 arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER);
564
565 if (complete_os_eviction)
566 dmu_objset_evict_done(os);
567 }
568
569 void
dnode_allocate(dnode_t * dn,dmu_object_type_t ot,int blocksize,int ibs,dmu_object_type_t bonustype,int bonuslen,int dn_slots,dmu_tx_t * tx)570 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
571 dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
572 {
573 int i;
574
575 ASSERT3U(dn_slots, >, 0);
576 ASSERT3U(dn_slots << DNODE_SHIFT, <=,
577 spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
578 ASSERT3U(blocksize, <=,
579 spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
580 if (blocksize == 0)
581 blocksize = 1 << zfs_default_bs;
582 else
583 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
584
585 if (ibs == 0)
586 ibs = zfs_default_ibs;
587
588 ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
589
590 dprintf("os=%p obj=%" PRIu64 " txg=%" PRIu64
591 " blocksize=%d ibs=%d dn_slots=%d\n",
592 dn->dn_objset, dn->dn_object, tx->tx_txg, blocksize, ibs, dn_slots);
593 DNODE_STAT_BUMP(dnode_allocate);
594
595 ASSERT(dn->dn_type == DMU_OT_NONE);
596 ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
597 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
598 ASSERT(ot != DMU_OT_NONE);
599 ASSERT(DMU_OT_IS_VALID(ot));
600 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
601 (bonustype == DMU_OT_SA && bonuslen == 0) ||
602 (bonustype != DMU_OT_NONE && bonuslen != 0));
603 ASSERT(DMU_OT_IS_VALID(bonustype));
604 ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
605 ASSERT(dn->dn_type == DMU_OT_NONE);
606 ASSERT0(dn->dn_maxblkid);
607 ASSERT0(dn->dn_allocated_txg);
608 ASSERT0(dn->dn_dirty_txg);
609 ASSERT0(dn->dn_assigned_txg);
610 ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
611 ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
612 ASSERT(avl_is_empty(&dn->dn_dbufs));
613
614 for (i = 0; i < TXG_SIZE; i++) {
615 ASSERT0(dn->dn_next_nblkptr[i]);
616 ASSERT0(dn->dn_next_nlevels[i]);
617 ASSERT0(dn->dn_next_indblkshift[i]);
618 ASSERT0(dn->dn_next_bonuslen[i]);
619 ASSERT0(dn->dn_next_bonustype[i]);
620 ASSERT0(dn->dn_rm_spillblk[i]);
621 ASSERT0(dn->dn_next_blksz[i]);
622 ASSERT0(dn->dn_next_maxblkid[i]);
623 ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
624 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
625 ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
626 }
627
628 dn->dn_type = ot;
629 dnode_setdblksz(dn, blocksize);
630 dn->dn_indblkshift = ibs;
631 dn->dn_nlevels = 1;
632 dn->dn_num_slots = dn_slots;
633 if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
634 dn->dn_nblkptr = 1;
635 else {
636 dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
637 1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
638 SPA_BLKPTRSHIFT));
639 }
640
641 dn->dn_bonustype = bonustype;
642 dn->dn_bonuslen = bonuslen;
643 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
644 dn->dn_compress = ZIO_COMPRESS_INHERIT;
645 dn->dn_dirtyctx = 0;
646
647 dn->dn_free_txg = 0;
648 if (dn->dn_dirtyctx_firstset) {
649 kmem_free(dn->dn_dirtyctx_firstset, 1);
650 dn->dn_dirtyctx_firstset = NULL;
651 }
652
653 dn->dn_allocated_txg = tx->tx_txg;
654 dn->dn_id_flags = 0;
655
656 dnode_setdirty(dn, tx);
657 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
658 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
659 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
660 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
661 }
662
663 void
dnode_reallocate(dnode_t * dn,dmu_object_type_t ot,int blocksize,dmu_object_type_t bonustype,int bonuslen,int dn_slots,boolean_t keep_spill,dmu_tx_t * tx)664 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
665 dmu_object_type_t bonustype, int bonuslen, int dn_slots,
666 boolean_t keep_spill, dmu_tx_t *tx)
667 {
668 int nblkptr;
669
670 ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
671 ASSERT3U(blocksize, <=,
672 spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
673 ASSERT0(blocksize % SPA_MINBLOCKSIZE);
674 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
675 ASSERT(tx->tx_txg != 0);
676 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
677 (bonustype != DMU_OT_NONE && bonuslen != 0) ||
678 (bonustype == DMU_OT_SA && bonuslen == 0));
679 ASSERT(DMU_OT_IS_VALID(bonustype));
680 ASSERT3U(bonuslen, <=,
681 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
682 ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
683
684 dnode_free_interior_slots(dn);
685 DNODE_STAT_BUMP(dnode_reallocate);
686
687 /* clean up any unreferenced dbufs */
688 dnode_evict_dbufs(dn);
689
690 dn->dn_id_flags = 0;
691
692 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
693 dnode_setdirty(dn, tx);
694 if (dn->dn_datablksz != blocksize) {
695 /* change blocksize */
696 ASSERT(dn->dn_maxblkid == 0 &&
697 (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
698 dnode_block_freed(dn, 0)));
699 dnode_setdblksz(dn, blocksize);
700 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
701 }
702 if (dn->dn_bonuslen != bonuslen)
703 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
704
705 if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
706 nblkptr = 1;
707 else
708 nblkptr = MIN(DN_MAX_NBLKPTR,
709 1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
710 SPA_BLKPTRSHIFT));
711 if (dn->dn_bonustype != bonustype)
712 dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
713 if (dn->dn_nblkptr != nblkptr)
714 dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
715 if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR && !keep_spill) {
716 dbuf_rm_spill(dn, tx);
717 dnode_rm_spill(dn, tx);
718 }
719 rw_exit(&dn->dn_struct_rwlock);
720
721 /* change type */
722 dn->dn_type = ot;
723
724 /* change bonus size and type */
725 mutex_enter(&dn->dn_mtx);
726 dn->dn_bonustype = bonustype;
727 dn->dn_bonuslen = bonuslen;
728 dn->dn_num_slots = dn_slots;
729 dn->dn_nblkptr = nblkptr;
730 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
731 dn->dn_compress = ZIO_COMPRESS_INHERIT;
732 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
733
734 /* fix up the bonus db_size */
735 if (dn->dn_bonus) {
736 dn->dn_bonus->db.db_size =
737 DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
738 (dn->dn_nblkptr - 1) * sizeof (blkptr_t);
739 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
740 }
741
742 dn->dn_allocated_txg = tx->tx_txg;
743 mutex_exit(&dn->dn_mtx);
744 }
745
746 #ifdef _KERNEL
747 static void
dnode_move_impl(dnode_t * odn,dnode_t * ndn)748 dnode_move_impl(dnode_t *odn, dnode_t *ndn)
749 {
750 int i;
751
752 ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
753 ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
754 ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
755 ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock));
756
757 /* Copy fields. */
758 ndn->dn_objset = odn->dn_objset;
759 ndn->dn_object = odn->dn_object;
760 ndn->dn_dbuf = odn->dn_dbuf;
761 ndn->dn_handle = odn->dn_handle;
762 ndn->dn_phys = odn->dn_phys;
763 ndn->dn_type = odn->dn_type;
764 ndn->dn_bonuslen = odn->dn_bonuslen;
765 ndn->dn_bonustype = odn->dn_bonustype;
766 ndn->dn_nblkptr = odn->dn_nblkptr;
767 ndn->dn_checksum = odn->dn_checksum;
768 ndn->dn_compress = odn->dn_compress;
769 ndn->dn_nlevels = odn->dn_nlevels;
770 ndn->dn_indblkshift = odn->dn_indblkshift;
771 ndn->dn_datablkshift = odn->dn_datablkshift;
772 ndn->dn_datablkszsec = odn->dn_datablkszsec;
773 ndn->dn_datablksz = odn->dn_datablksz;
774 ndn->dn_maxblkid = odn->dn_maxblkid;
775 ndn->dn_num_slots = odn->dn_num_slots;
776 bcopy(&odn->dn_next_type[0], &ndn->dn_next_type[0],
777 sizeof (odn->dn_next_type));
778 bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
779 sizeof (odn->dn_next_nblkptr));
780 bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
781 sizeof (odn->dn_next_nlevels));
782 bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
783 sizeof (odn->dn_next_indblkshift));
784 bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
785 sizeof (odn->dn_next_bonustype));
786 bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
787 sizeof (odn->dn_rm_spillblk));
788 bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
789 sizeof (odn->dn_next_bonuslen));
790 bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
791 sizeof (odn->dn_next_blksz));
792 bcopy(&odn->dn_next_maxblkid[0], &ndn->dn_next_maxblkid[0],
793 sizeof (odn->dn_next_maxblkid));
794 for (i = 0; i < TXG_SIZE; i++) {
795 list_move_tail(&ndn->dn_dirty_records[i],
796 &odn->dn_dirty_records[i]);
797 }
798 bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
799 sizeof (odn->dn_free_ranges));
800 ndn->dn_allocated_txg = odn->dn_allocated_txg;
801 ndn->dn_free_txg = odn->dn_free_txg;
802 ndn->dn_assigned_txg = odn->dn_assigned_txg;
803 ndn->dn_dirty_txg = odn->dn_dirty_txg;
804 ndn->dn_dirtyctx = odn->dn_dirtyctx;
805 ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
806 ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
807 zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
808 ASSERT(avl_is_empty(&ndn->dn_dbufs));
809 avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
810 ndn->dn_dbufs_count = odn->dn_dbufs_count;
811 ndn->dn_bonus = odn->dn_bonus;
812 ndn->dn_have_spill = odn->dn_have_spill;
813 ndn->dn_zio = odn->dn_zio;
814 ndn->dn_oldused = odn->dn_oldused;
815 ndn->dn_oldflags = odn->dn_oldflags;
816 ndn->dn_olduid = odn->dn_olduid;
817 ndn->dn_oldgid = odn->dn_oldgid;
818 ndn->dn_oldprojid = odn->dn_oldprojid;
819 ndn->dn_newuid = odn->dn_newuid;
820 ndn->dn_newgid = odn->dn_newgid;
821 ndn->dn_newprojid = odn->dn_newprojid;
822 ndn->dn_id_flags = odn->dn_id_flags;
823 dmu_zfetch_init(&ndn->dn_zfetch, NULL);
824 list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
825 ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
826
827 /*
828 * Update back pointers. Updating the handle fixes the back pointer of
829 * every descendant dbuf as well as the bonus dbuf.
830 */
831 ASSERT(ndn->dn_handle->dnh_dnode == odn);
832 ndn->dn_handle->dnh_dnode = ndn;
833 if (ndn->dn_zfetch.zf_dnode == odn) {
834 ndn->dn_zfetch.zf_dnode = ndn;
835 }
836
837 /*
838 * Invalidate the original dnode by clearing all of its back pointers.
839 */
840 odn->dn_dbuf = NULL;
841 odn->dn_handle = NULL;
842 avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
843 offsetof(dmu_buf_impl_t, db_link));
844 odn->dn_dbufs_count = 0;
845 odn->dn_bonus = NULL;
846 odn->dn_zfetch.zf_dnode = NULL;
847
848 /*
849 * Set the low bit of the objset pointer to ensure that dnode_move()
850 * recognizes the dnode as invalid in any subsequent callback.
851 */
852 POINTER_INVALIDATE(&odn->dn_objset);
853
854 /*
855 * Satisfy the destructor.
856 */
857 for (i = 0; i < TXG_SIZE; i++) {
858 list_create(&odn->dn_dirty_records[i],
859 sizeof (dbuf_dirty_record_t),
860 offsetof(dbuf_dirty_record_t, dr_dirty_node));
861 odn->dn_free_ranges[i] = NULL;
862 odn->dn_next_nlevels[i] = 0;
863 odn->dn_next_indblkshift[i] = 0;
864 odn->dn_next_bonustype[i] = 0;
865 odn->dn_rm_spillblk[i] = 0;
866 odn->dn_next_bonuslen[i] = 0;
867 odn->dn_next_blksz[i] = 0;
868 }
869 odn->dn_allocated_txg = 0;
870 odn->dn_free_txg = 0;
871 odn->dn_assigned_txg = 0;
872 odn->dn_dirty_txg = 0;
873 odn->dn_dirtyctx = 0;
874 odn->dn_dirtyctx_firstset = NULL;
875 odn->dn_have_spill = B_FALSE;
876 odn->dn_zio = NULL;
877 odn->dn_oldused = 0;
878 odn->dn_oldflags = 0;
879 odn->dn_olduid = 0;
880 odn->dn_oldgid = 0;
881 odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
882 odn->dn_newuid = 0;
883 odn->dn_newgid = 0;
884 odn->dn_newprojid = ZFS_DEFAULT_PROJID;
885 odn->dn_id_flags = 0;
886
887 /*
888 * Mark the dnode.
889 */
890 ndn->dn_moved = 1;
891 odn->dn_moved = (uint8_t)-1;
892 }
893
894 /*ARGSUSED*/
895 static kmem_cbrc_t
dnode_move(void * buf,void * newbuf,size_t size,void * arg)896 dnode_move(void *buf, void *newbuf, size_t size, void *arg)
897 {
898 dnode_t *odn = buf, *ndn = newbuf;
899 objset_t *os;
900 int64_t refcount;
901 uint32_t dbufs;
902
903 /*
904 * The dnode is on the objset's list of known dnodes if the objset
905 * pointer is valid. We set the low bit of the objset pointer when
906 * freeing the dnode to invalidate it, and the memory patterns written
907 * by kmem (baddcafe and deadbeef) set at least one of the two low bits.
908 * A newly created dnode sets the objset pointer last of all to indicate
909 * that the dnode is known and in a valid state to be moved by this
910 * function.
911 */
912 os = odn->dn_objset;
913 if (!POINTER_IS_VALID(os)) {
914 DNODE_STAT_BUMP(dnode_move_invalid);
915 return (KMEM_CBRC_DONT_KNOW);
916 }
917
918 /*
919 * Ensure that the objset does not go away during the move.
920 */
921 rw_enter(&os_lock, RW_WRITER);
922 if (os != odn->dn_objset) {
923 rw_exit(&os_lock);
924 DNODE_STAT_BUMP(dnode_move_recheck1);
925 return (KMEM_CBRC_DONT_KNOW);
926 }
927
928 /*
929 * If the dnode is still valid, then so is the objset. We know that no
930 * valid objset can be freed while we hold os_lock, so we can safely
931 * ensure that the objset remains in use.
932 */
933 mutex_enter(&os->os_lock);
934
935 /*
936 * Recheck the objset pointer in case the dnode was removed just before
937 * acquiring the lock.
938 */
939 if (os != odn->dn_objset) {
940 mutex_exit(&os->os_lock);
941 rw_exit(&os_lock);
942 DNODE_STAT_BUMP(dnode_move_recheck2);
943 return (KMEM_CBRC_DONT_KNOW);
944 }
945
946 /*
947 * At this point we know that as long as we hold os->os_lock, the dnode
948 * cannot be freed and fields within the dnode can be safely accessed.
949 * The objset listing this dnode cannot go away as long as this dnode is
950 * on its list.
951 */
952 rw_exit(&os_lock);
953 if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
954 mutex_exit(&os->os_lock);
955 DNODE_STAT_BUMP(dnode_move_special);
956 return (KMEM_CBRC_NO);
957 }
958 ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
959
960 /*
961 * Lock the dnode handle to prevent the dnode from obtaining any new
962 * holds. This also prevents the descendant dbufs and the bonus dbuf
963 * from accessing the dnode, so that we can discount their holds. The
964 * handle is safe to access because we know that while the dnode cannot
965 * go away, neither can its handle. Once we hold dnh_zrlock, we can
966 * safely move any dnode referenced only by dbufs.
967 */
968 if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
969 mutex_exit(&os->os_lock);
970 DNODE_STAT_BUMP(dnode_move_handle);
971 return (KMEM_CBRC_LATER);
972 }
973
974 /*
975 * Ensure a consistent view of the dnode's holds and the dnode's dbufs.
976 * We need to guarantee that there is a hold for every dbuf in order to
977 * determine whether the dnode is actively referenced. Falsely matching
978 * a dbuf to an active hold would lead to an unsafe move. It's possible
979 * that a thread already having an active dnode hold is about to add a
980 * dbuf, and we can't compare hold and dbuf counts while the add is in
981 * progress.
982 */
983 if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
984 zrl_exit(&odn->dn_handle->dnh_zrlock);
985 mutex_exit(&os->os_lock);
986 DNODE_STAT_BUMP(dnode_move_rwlock);
987 return (KMEM_CBRC_LATER);
988 }
989
990 /*
991 * A dbuf may be removed (evicted) without an active dnode hold. In that
992 * case, the dbuf count is decremented under the handle lock before the
993 * dbuf's hold is released. This order ensures that if we count the hold
994 * after the dbuf is removed but before its hold is released, we will
995 * treat the unmatched hold as active and exit safely. If we count the
996 * hold before the dbuf is removed, the hold is discounted, and the
997 * removal is blocked until the move completes.
998 */
999 refcount = zfs_refcount_count(&odn->dn_holds);
1000 ASSERT(refcount >= 0);
1001 dbufs = odn->dn_dbufs_count;
1002
1003 /* We can't have more dbufs than dnode holds. */
1004 ASSERT3U(dbufs, <=, refcount);
1005 DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
1006 uint32_t, dbufs);
1007
1008 if (refcount > dbufs) {
1009 rw_exit(&odn->dn_struct_rwlock);
1010 zrl_exit(&odn->dn_handle->dnh_zrlock);
1011 mutex_exit(&os->os_lock);
1012 DNODE_STAT_BUMP(dnode_move_active);
1013 return (KMEM_CBRC_LATER);
1014 }
1015
1016 rw_exit(&odn->dn_struct_rwlock);
1017
1018 /*
1019 * At this point we know that anyone with a hold on the dnode is not
1020 * actively referencing it. The dnode is known and in a valid state to
1021 * move. We're holding the locks needed to execute the critical section.
1022 */
1023 dnode_move_impl(odn, ndn);
1024
1025 list_link_replace(&odn->dn_link, &ndn->dn_link);
1026 /* If the dnode was safe to move, the refcount cannot have changed. */
1027 ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
1028 ASSERT(dbufs == ndn->dn_dbufs_count);
1029 zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
1030 mutex_exit(&os->os_lock);
1031
1032 return (KMEM_CBRC_YES);
1033 }
1034 #endif /* _KERNEL */
1035
1036 static void
dnode_slots_hold(dnode_children_t * children,int idx,int slots)1037 dnode_slots_hold(dnode_children_t *children, int idx, int slots)
1038 {
1039 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1040
1041 for (int i = idx; i < idx + slots; i++) {
1042 dnode_handle_t *dnh = &children->dnc_children[i];
1043 zrl_add(&dnh->dnh_zrlock);
1044 }
1045 }
1046
1047 static void
dnode_slots_rele(dnode_children_t * children,int idx,int slots)1048 dnode_slots_rele(dnode_children_t *children, int idx, int slots)
1049 {
1050 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1051
1052 for (int i = idx; i < idx + slots; i++) {
1053 dnode_handle_t *dnh = &children->dnc_children[i];
1054
1055 if (zrl_is_locked(&dnh->dnh_zrlock))
1056 zrl_exit(&dnh->dnh_zrlock);
1057 else
1058 zrl_remove(&dnh->dnh_zrlock);
1059 }
1060 }
1061
1062 static void
dnode_slots_enter(dnode_children_t * children,int idx,int slots,kstat_named_t * statp)1063 dnode_slots_enter(dnode_children_t *children, int idx, int slots,
1064 kstat_named_t *statp)
1065 {
1066 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1067
1068 retry:
1069 for (int i = idx; i < idx + slots; i++) {
1070 dnode_handle_t *dnh = &children->dnc_children[i];
1071
1072 if (!zrl_tryenter(&dnh->dnh_zrlock)) {
1073 for (int j = idx; j < i; j++) {
1074 dnh = &children->dnc_children[j];
1075 zrl_exit(&dnh->dnh_zrlock);
1076 }
1077
1078 atomic_add_64(&statp->value.ui64, 1);
1079 kpreempt(KPREEMPT_SYNC);
1080 goto retry;
1081 }
1082 }
1083 }
1084
1085 static void
dnode_set_slots(dnode_children_t * children,int idx,int slots,void * ptr)1086 dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
1087 {
1088 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1089
1090 for (int i = idx; i < idx + slots; i++) {
1091 dnode_handle_t *dnh = &children->dnc_children[i];
1092 dnh->dnh_dnode = ptr;
1093 }
1094 }
1095
1096 static boolean_t
dnode_check_slots_free(dnode_children_t * children,int idx,int slots)1097 dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
1098 {
1099 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1100
1101 /*
1102 * If all dnode slots are either already free or
1103 * evictable return B_TRUE.
1104 */
1105 for (int i = idx; i < idx + slots; i++) {
1106 dnode_handle_t *dnh = &children->dnc_children[i];
1107 dnode_t *dn = dnh->dnh_dnode;
1108
1109 if (dn == DN_SLOT_FREE) {
1110 continue;
1111 } else if (DN_SLOT_IS_PTR(dn)) {
1112 mutex_enter(&dn->dn_mtx);
1113 boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
1114 zfs_refcount_is_zero(&dn->dn_holds) &&
1115 !DNODE_IS_DIRTY(dn));
1116 mutex_exit(&dn->dn_mtx);
1117
1118 if (!can_free)
1119 return (B_FALSE);
1120 else
1121 continue;
1122 } else {
1123 return (B_FALSE);
1124 }
1125 }
1126
1127 return (B_TRUE);
1128 }
1129
1130 static void
dnode_reclaim_slots(dnode_children_t * children,int idx,int slots)1131 dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
1132 {
1133 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1134
1135 for (int i = idx; i < idx + slots; i++) {
1136 dnode_handle_t *dnh = &children->dnc_children[i];
1137
1138 ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
1139
1140 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1141 ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
1142 dnode_destroy(dnh->dnh_dnode);
1143 dnh->dnh_dnode = DN_SLOT_FREE;
1144 }
1145 }
1146 }
1147
1148 void
dnode_free_interior_slots(dnode_t * dn)1149 dnode_free_interior_slots(dnode_t *dn)
1150 {
1151 dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
1152 int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
1153 int idx = (dn->dn_object & (epb - 1)) + 1;
1154 int slots = dn->dn_num_slots - 1;
1155
1156 if (slots == 0)
1157 return;
1158
1159 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1160
1161 dnode_slots_enter(children, idx, slots,
1162 &dnode_stats.dnode_free_interior_lock_retry);
1163
1164 dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
1165 dnode_slots_rele(children, idx, slots);
1166 }
1167
1168 void
dnode_special_close(dnode_handle_t * dnh)1169 dnode_special_close(dnode_handle_t *dnh)
1170 {
1171 dnode_t *dn = dnh->dnh_dnode;
1172
1173 /*
1174 * Ensure dnode_rele_and_unlock() has released dn_mtx, after final
1175 * zfs_refcount_remove()
1176 */
1177 mutex_enter(&dn->dn_mtx);
1178 if (zfs_refcount_count(&dn->dn_holds) > 0)
1179 cv_wait(&dn->dn_nodnholds, &dn->dn_mtx);
1180 mutex_exit(&dn->dn_mtx);
1181 ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
1182
1183 ASSERT(dn->dn_dbuf == NULL ||
1184 dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
1185 zrl_add(&dnh->dnh_zrlock);
1186 dnode_destroy(dn); /* implicit zrl_remove() */
1187 zrl_destroy(&dnh->dnh_zrlock);
1188 dnh->dnh_dnode = NULL;
1189 }
1190
1191 void
dnode_special_open(objset_t * os,dnode_phys_t * dnp,uint64_t object,dnode_handle_t * dnh)1192 dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
1193 dnode_handle_t *dnh)
1194 {
1195 dnode_t *dn;
1196
1197 zrl_init(&dnh->dnh_zrlock);
1198 VERIFY3U(1, ==, zrl_tryenter(&dnh->dnh_zrlock));
1199
1200 dn = dnode_create(os, dnp, NULL, object, dnh);
1201 DNODE_VERIFY(dn);
1202
1203 zrl_exit(&dnh->dnh_zrlock);
1204 }
1205
1206 static void
dnode_buf_evict_async(void * dbu)1207 dnode_buf_evict_async(void *dbu)
1208 {
1209 dnode_children_t *dnc = dbu;
1210
1211 DNODE_STAT_BUMP(dnode_buf_evict);
1212
1213 for (int i = 0; i < dnc->dnc_count; i++) {
1214 dnode_handle_t *dnh = &dnc->dnc_children[i];
1215 dnode_t *dn;
1216
1217 /*
1218 * The dnode handle lock guards against the dnode moving to
1219 * another valid address, so there is no need here to guard
1220 * against changes to or from NULL.
1221 */
1222 if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1223 zrl_destroy(&dnh->dnh_zrlock);
1224 dnh->dnh_dnode = DN_SLOT_UNINIT;
1225 continue;
1226 }
1227
1228 zrl_add(&dnh->dnh_zrlock);
1229 dn = dnh->dnh_dnode;
1230 /*
1231 * If there are holds on this dnode, then there should
1232 * be holds on the dnode's containing dbuf as well; thus
1233 * it wouldn't be eligible for eviction and this function
1234 * would not have been called.
1235 */
1236 ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
1237 ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
1238
1239 dnode_destroy(dn); /* implicit zrl_remove() for first slot */
1240 zrl_destroy(&dnh->dnh_zrlock);
1241 dnh->dnh_dnode = DN_SLOT_UNINIT;
1242 }
1243 kmem_free(dnc, sizeof (dnode_children_t) +
1244 dnc->dnc_count * sizeof (dnode_handle_t));
1245 }
1246
1247 /*
1248 * When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
1249 * to ensure the hole at the specified object offset is large enough to
1250 * hold the dnode being created. The slots parameter is also used to ensure
1251 * a dnode does not span multiple dnode blocks. In both of these cases, if
1252 * a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
1253 * are only possible when using DNODE_MUST_BE_FREE.
1254 *
1255 * If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
1256 * dnode_hold_impl() will check if the requested dnode is already consumed
1257 * as an extra dnode slot by an large dnode, in which case it returns
1258 * ENOENT.
1259 *
1260 * If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
1261 * return whether the hold would succeed or not. tag and dnp should set to
1262 * NULL in this case.
1263 *
1264 * errors:
1265 * EINVAL - invalid object number or flags.
1266 * ENOSPC - hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
1267 * EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
1268 * - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
1269 * - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
1270 * ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
1271 * - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
1272 * EIO - i/o error error when reading the meta dnode dbuf.
1273 * succeeds even for free dnodes.
1274 */
1275 int
dnode_hold_impl(objset_t * os,uint64_t object,int flag,int slots,void * tag,dnode_t ** dnp)1276 dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
1277 void *tag, dnode_t **dnp)
1278 {
1279 int epb, idx, err;
1280 int drop_struct_lock = FALSE;
1281 int type;
1282 uint64_t blk;
1283 dnode_t *mdn, *dn;
1284 dmu_buf_impl_t *db;
1285 dnode_children_t *dnc;
1286 dnode_phys_t *dn_block;
1287 dnode_handle_t *dnh;
1288
1289 ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
1290 ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
1291 IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
1292
1293 /*
1294 * If you are holding the spa config lock as writer, you shouldn't
1295 * be asking the DMU to do *anything* unless it's the root pool
1296 * which may require us to read from the root filesystem while
1297 * holding some (not all) of the locks as writer.
1298 */
1299 ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1300 (spa_is_root(os->os_spa) &&
1301 spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1302
1303 ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
1304
1305 if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
1306 object == DMU_PROJECTUSED_OBJECT) {
1307 if (object == DMU_USERUSED_OBJECT)
1308 dn = DMU_USERUSED_DNODE(os);
1309 else if (object == DMU_GROUPUSED_OBJECT)
1310 dn = DMU_GROUPUSED_DNODE(os);
1311 else
1312 dn = DMU_PROJECTUSED_DNODE(os);
1313 if (dn == NULL)
1314 return (SET_ERROR(ENOENT));
1315 type = dn->dn_type;
1316 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1317 return (SET_ERROR(ENOENT));
1318 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1319 return (SET_ERROR(EEXIST));
1320 DNODE_VERIFY(dn);
1321 /* Don't actually hold if dry run, just return 0 */
1322 if (!(flag & DNODE_DRY_RUN)) {
1323 (void) zfs_refcount_add(&dn->dn_holds, tag);
1324 *dnp = dn;
1325 }
1326 return (0);
1327 }
1328
1329 if (object == 0 || object >= DN_MAX_OBJECT)
1330 return (SET_ERROR(EINVAL));
1331
1332 mdn = DMU_META_DNODE(os);
1333 ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1334
1335 DNODE_VERIFY(mdn);
1336
1337 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1338 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1339 drop_struct_lock = TRUE;
1340 }
1341
1342 blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
1343 db = dbuf_hold(mdn, blk, FTAG);
1344 if (drop_struct_lock)
1345 rw_exit(&mdn->dn_struct_rwlock);
1346 if (db == NULL) {
1347 DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
1348 return (SET_ERROR(EIO));
1349 }
1350 /*
1351 * We do not need to decrypt to read the dnode so it doesn't matter
1352 * if we get the encrypted or decrypted version.
1353 */
1354 err = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_NO_DECRYPT);
1355 if (err) {
1356 DNODE_STAT_BUMP(dnode_hold_dbuf_read);
1357 dbuf_rele(db, FTAG);
1358 return (err);
1359 }
1360
1361 ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
1362 epb = db->db.db_size >> DNODE_SHIFT;
1363
1364 idx = object & (epb - 1);
1365 dn_block = (dnode_phys_t *)db->db.db_data;
1366
1367 ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
1368 dnc = dmu_buf_get_user(&db->db);
1369 dnh = NULL;
1370 if (dnc == NULL) {
1371 dnode_children_t *winner;
1372 int skip = 0;
1373
1374 dnc = kmem_zalloc(sizeof (dnode_children_t) +
1375 epb * sizeof (dnode_handle_t), KM_SLEEP);
1376 dnc->dnc_count = epb;
1377 dnh = &dnc->dnc_children[0];
1378
1379 /* Initialize dnode slot status from dnode_phys_t */
1380 for (int i = 0; i < epb; i++) {
1381 zrl_init(&dnh[i].dnh_zrlock);
1382
1383 if (skip) {
1384 skip--;
1385 continue;
1386 }
1387
1388 if (dn_block[i].dn_type != DMU_OT_NONE) {
1389 int interior = dn_block[i].dn_extra_slots;
1390
1391 dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
1392 dnode_set_slots(dnc, i + 1, interior,
1393 DN_SLOT_INTERIOR);
1394 skip = interior;
1395 } else {
1396 dnh[i].dnh_dnode = DN_SLOT_FREE;
1397 skip = 0;
1398 }
1399 }
1400
1401 dmu_buf_init_user(&dnc->dnc_dbu, NULL,
1402 dnode_buf_evict_async, NULL);
1403 winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
1404 if (winner != NULL) {
1405
1406 for (int i = 0; i < epb; i++)
1407 zrl_destroy(&dnh[i].dnh_zrlock);
1408
1409 kmem_free(dnc, sizeof (dnode_children_t) +
1410 epb * sizeof (dnode_handle_t));
1411 dnc = winner;
1412 }
1413 }
1414
1415 ASSERT(dnc->dnc_count == epb);
1416
1417 if (flag & DNODE_MUST_BE_ALLOCATED) {
1418 slots = 1;
1419
1420 dnode_slots_hold(dnc, idx, slots);
1421 dnh = &dnc->dnc_children[idx];
1422
1423 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1424 dn = dnh->dnh_dnode;
1425 } else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
1426 DNODE_STAT_BUMP(dnode_hold_alloc_interior);
1427 dnode_slots_rele(dnc, idx, slots);
1428 dbuf_rele(db, FTAG);
1429 return (SET_ERROR(EEXIST));
1430 } else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
1431 DNODE_STAT_BUMP(dnode_hold_alloc_misses);
1432 dnode_slots_rele(dnc, idx, slots);
1433 dbuf_rele(db, FTAG);
1434 return (SET_ERROR(ENOENT));
1435 } else {
1436 dnode_slots_rele(dnc, idx, slots);
1437 dnode_slots_enter(dnc, idx, slots,
1438 &dnode_stats.dnode_hold_alloc_lock_retry);
1439
1440 /*
1441 * Someone else won the race and called dnode_create()
1442 * after we checked DN_SLOT_IS_PTR() above but before
1443 * we acquired the lock.
1444 */
1445 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1446 DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
1447 dn = dnh->dnh_dnode;
1448 } else {
1449 dn = dnode_create(os, dn_block + idx, db,
1450 object, dnh);
1451 }
1452 }
1453
1454 mutex_enter(&dn->dn_mtx);
1455 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
1456 DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
1457 mutex_exit(&dn->dn_mtx);
1458 dnode_slots_rele(dnc, idx, slots);
1459 dbuf_rele(db, FTAG);
1460 return (SET_ERROR(ENOENT));
1461 }
1462
1463 /* Don't actually hold if dry run, just return 0 */
1464 if (flag & DNODE_DRY_RUN) {
1465 mutex_exit(&dn->dn_mtx);
1466 dnode_slots_rele(dnc, idx, slots);
1467 dbuf_rele(db, FTAG);
1468 return (0);
1469 }
1470
1471 DNODE_STAT_BUMP(dnode_hold_alloc_hits);
1472 } else if (flag & DNODE_MUST_BE_FREE) {
1473
1474 if (idx + slots - 1 >= DNODES_PER_BLOCK) {
1475 DNODE_STAT_BUMP(dnode_hold_free_overflow);
1476 dbuf_rele(db, FTAG);
1477 return (SET_ERROR(ENOSPC));
1478 }
1479
1480 dnode_slots_hold(dnc, idx, slots);
1481
1482 if (!dnode_check_slots_free(dnc, idx, slots)) {
1483 DNODE_STAT_BUMP(dnode_hold_free_misses);
1484 dnode_slots_rele(dnc, idx, slots);
1485 dbuf_rele(db, FTAG);
1486 return (SET_ERROR(ENOSPC));
1487 }
1488
1489 dnode_slots_rele(dnc, idx, slots);
1490 dnode_slots_enter(dnc, idx, slots,
1491 &dnode_stats.dnode_hold_free_lock_retry);
1492
1493 if (!dnode_check_slots_free(dnc, idx, slots)) {
1494 DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
1495 dnode_slots_rele(dnc, idx, slots);
1496 dbuf_rele(db, FTAG);
1497 return (SET_ERROR(ENOSPC));
1498 }
1499
1500 /*
1501 * Allocated but otherwise free dnodes which would
1502 * be in the interior of a multi-slot dnodes need
1503 * to be freed. Single slot dnodes can be safely
1504 * re-purposed as a performance optimization.
1505 */
1506 if (slots > 1)
1507 dnode_reclaim_slots(dnc, idx + 1, slots - 1);
1508
1509 dnh = &dnc->dnc_children[idx];
1510 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1511 dn = dnh->dnh_dnode;
1512 } else {
1513 dn = dnode_create(os, dn_block + idx, db,
1514 object, dnh);
1515 }
1516
1517 mutex_enter(&dn->dn_mtx);
1518 if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
1519 DNODE_STAT_BUMP(dnode_hold_free_refcount);
1520 mutex_exit(&dn->dn_mtx);
1521 dnode_slots_rele(dnc, idx, slots);
1522 dbuf_rele(db, FTAG);
1523 return (SET_ERROR(EEXIST));
1524 }
1525
1526 /* Don't actually hold if dry run, just return 0 */
1527 if (flag & DNODE_DRY_RUN) {
1528 mutex_exit(&dn->dn_mtx);
1529 dnode_slots_rele(dnc, idx, slots);
1530 dbuf_rele(db, FTAG);
1531 return (0);
1532 }
1533
1534 dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
1535 DNODE_STAT_BUMP(dnode_hold_free_hits);
1536 } else {
1537 dbuf_rele(db, FTAG);
1538 return (SET_ERROR(EINVAL));
1539 }
1540
1541 ASSERT0(dn->dn_free_txg);
1542
1543 if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
1544 dbuf_add_ref(db, dnh);
1545
1546 mutex_exit(&dn->dn_mtx);
1547
1548 /* Now we can rely on the hold to prevent the dnode from moving. */
1549 dnode_slots_rele(dnc, idx, slots);
1550
1551 DNODE_VERIFY(dn);
1552 ASSERT3P(dn->dn_dbuf, ==, db);
1553 ASSERT3U(dn->dn_object, ==, object);
1554 dbuf_rele(db, FTAG);
1555
1556 *dnp = dn;
1557 return (0);
1558 }
1559
1560 /*
1561 * Return held dnode if the object is allocated, NULL if not.
1562 */
1563 int
dnode_hold(objset_t * os,uint64_t object,void * tag,dnode_t ** dnp)1564 dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
1565 {
1566 return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
1567 dnp));
1568 }
1569
1570 /*
1571 * Can only add a reference if there is already at least one
1572 * reference on the dnode. Returns FALSE if unable to add a
1573 * new reference.
1574 */
1575 boolean_t
dnode_add_ref(dnode_t * dn,void * tag)1576 dnode_add_ref(dnode_t *dn, void *tag)
1577 {
1578 mutex_enter(&dn->dn_mtx);
1579 if (zfs_refcount_is_zero(&dn->dn_holds)) {
1580 mutex_exit(&dn->dn_mtx);
1581 return (FALSE);
1582 }
1583 VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
1584 mutex_exit(&dn->dn_mtx);
1585 return (TRUE);
1586 }
1587
1588 void
dnode_rele(dnode_t * dn,void * tag)1589 dnode_rele(dnode_t *dn, void *tag)
1590 {
1591 mutex_enter(&dn->dn_mtx);
1592 dnode_rele_and_unlock(dn, tag, B_FALSE);
1593 }
1594
1595 void
dnode_rele_and_unlock(dnode_t * dn,void * tag,boolean_t evicting)1596 dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
1597 {
1598 uint64_t refs;
1599 /* Get while the hold prevents the dnode from moving. */
1600 dmu_buf_impl_t *db = dn->dn_dbuf;
1601 dnode_handle_t *dnh = dn->dn_handle;
1602
1603 refs = zfs_refcount_remove(&dn->dn_holds, tag);
1604 if (refs == 0)
1605 cv_broadcast(&dn->dn_nodnholds);
1606 mutex_exit(&dn->dn_mtx);
1607 /* dnode could get destroyed at this point, so don't use it anymore */
1608
1609 /*
1610 * It's unsafe to release the last hold on a dnode by dnode_rele() or
1611 * indirectly by dbuf_rele() while relying on the dnode handle to
1612 * prevent the dnode from moving, since releasing the last hold could
1613 * result in the dnode's parent dbuf evicting its dnode handles. For
1614 * that reason anyone calling dnode_rele() or dbuf_rele() without some
1615 * other direct or indirect hold on the dnode must first drop the dnode
1616 * handle.
1617 */
1618 ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
1619
1620 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1621 if (refs == 0 && db != NULL) {
1622 /*
1623 * Another thread could add a hold to the dnode handle in
1624 * dnode_hold_impl() while holding the parent dbuf. Since the
1625 * hold on the parent dbuf prevents the handle from being
1626 * destroyed, the hold on the handle is OK. We can't yet assert
1627 * that the handle has zero references, but that will be
1628 * asserted anyway when the handle gets destroyed.
1629 */
1630 mutex_enter(&db->db_mtx);
1631 dbuf_rele_and_unlock(db, dnh, evicting);
1632 }
1633 }
1634
1635 /*
1636 * Test whether we can create a dnode at the specified location.
1637 */
1638 int
dnode_try_claim(objset_t * os,uint64_t object,int slots)1639 dnode_try_claim(objset_t *os, uint64_t object, int slots)
1640 {
1641 return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
1642 slots, NULL, NULL));
1643 }
1644
1645 void
dnode_setdirty(dnode_t * dn,dmu_tx_t * tx)1646 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1647 {
1648 objset_t *os = dn->dn_objset;
1649 uint64_t txg = tx->tx_txg;
1650
1651 if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1652 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1653 return;
1654 }
1655
1656 DNODE_VERIFY(dn);
1657
1658 #ifdef ZFS_DEBUG
1659 mutex_enter(&dn->dn_mtx);
1660 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1661 ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1662 mutex_exit(&dn->dn_mtx);
1663 #endif
1664
1665 /*
1666 * Determine old uid/gid when necessary
1667 */
1668 dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
1669
1670 multilist_t *dirtylist = os->os_dirty_dnodes[txg & TXG_MASK];
1671 multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
1672
1673 /*
1674 * If we are already marked dirty, we're done.
1675 */
1676 if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
1677 multilist_sublist_unlock(mls);
1678 return;
1679 }
1680
1681 ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
1682 !avl_is_empty(&dn->dn_dbufs));
1683 ASSERT(dn->dn_datablksz != 0);
1684 ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
1685 ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
1686 ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
1687
1688 dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
1689 dn->dn_object, txg);
1690
1691 multilist_sublist_insert_head(mls, dn);
1692
1693 multilist_sublist_unlock(mls);
1694
1695 /*
1696 * The dnode maintains a hold on its containing dbuf as
1697 * long as there are holds on it. Each instantiated child
1698 * dbuf maintains a hold on the dnode. When the last child
1699 * drops its hold, the dnode will drop its hold on the
1700 * containing dbuf. We add a "dirty hold" here so that the
1701 * dnode will hang around after we finish processing its
1702 * children.
1703 */
1704 VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1705
1706 (void) dbuf_dirty(dn->dn_dbuf, tx);
1707
1708 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1709 }
1710
1711 void
dnode_free(dnode_t * dn,dmu_tx_t * tx)1712 dnode_free(dnode_t *dn, dmu_tx_t *tx)
1713 {
1714 mutex_enter(&dn->dn_mtx);
1715 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1716 mutex_exit(&dn->dn_mtx);
1717 return;
1718 }
1719 dn->dn_free_txg = tx->tx_txg;
1720 mutex_exit(&dn->dn_mtx);
1721
1722 dnode_setdirty(dn, tx);
1723 }
1724
1725 /*
1726 * Try to change the block size for the indicated dnode. This can only
1727 * succeed if there are no blocks allocated or dirty beyond first block
1728 */
1729 int
dnode_set_blksz(dnode_t * dn,uint64_t size,int ibs,dmu_tx_t * tx)1730 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
1731 {
1732 dmu_buf_impl_t *db;
1733 int err;
1734
1735 ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
1736 if (size == 0)
1737 size = SPA_MINBLOCKSIZE;
1738 else
1739 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
1740
1741 if (ibs == dn->dn_indblkshift)
1742 ibs = 0;
1743
1744 if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
1745 return (0);
1746
1747 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1748
1749 /* Check for any allocated blocks beyond the first */
1750 if (dn->dn_maxblkid != 0)
1751 goto fail;
1752
1753 mutex_enter(&dn->dn_dbufs_mtx);
1754 for (db = avl_first(&dn->dn_dbufs); db != NULL;
1755 db = AVL_NEXT(&dn->dn_dbufs, db)) {
1756 if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
1757 db->db_blkid != DMU_SPILL_BLKID) {
1758 mutex_exit(&dn->dn_dbufs_mtx);
1759 goto fail;
1760 }
1761 }
1762 mutex_exit(&dn->dn_dbufs_mtx);
1763
1764 if (ibs && dn->dn_nlevels != 1)
1765 goto fail;
1766
1767 /* resize the old block */
1768 err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
1769 if (err == 0) {
1770 dbuf_new_size(db, size, tx);
1771 } else if (err != ENOENT) {
1772 goto fail;
1773 }
1774
1775 dnode_setdblksz(dn, size);
1776 dnode_setdirty(dn, tx);
1777 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
1778 if (ibs) {
1779 dn->dn_indblkshift = ibs;
1780 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
1781 }
1782 /* rele after we have fixed the blocksize in the dnode */
1783 if (db)
1784 dbuf_rele(db, FTAG);
1785
1786 rw_exit(&dn->dn_struct_rwlock);
1787 return (0);
1788
1789 fail:
1790 rw_exit(&dn->dn_struct_rwlock);
1791 return (SET_ERROR(ENOTSUP));
1792 }
1793
1794 static void
dnode_set_nlevels_impl(dnode_t * dn,int new_nlevels,dmu_tx_t * tx)1795 dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
1796 {
1797 uint64_t txgoff = tx->tx_txg & TXG_MASK;
1798 int old_nlevels = dn->dn_nlevels;
1799 dmu_buf_impl_t *db;
1800 list_t *list;
1801 dbuf_dirty_record_t *new, *dr, *dr_next;
1802
1803 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1804
1805 dn->dn_nlevels = new_nlevels;
1806
1807 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
1808 dn->dn_next_nlevels[txgoff] = new_nlevels;
1809
1810 /* dirty the left indirects */
1811 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
1812 ASSERT(db != NULL);
1813 new = dbuf_dirty(db, tx);
1814 dbuf_rele(db, FTAG);
1815
1816 /* transfer the dirty records to the new indirect */
1817 mutex_enter(&dn->dn_mtx);
1818 mutex_enter(&new->dt.di.dr_mtx);
1819 list = &dn->dn_dirty_records[txgoff];
1820 for (dr = list_head(list); dr; dr = dr_next) {
1821 dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
1822 if (dr->dr_dbuf->db_level != new_nlevels-1 &&
1823 dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
1824 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
1825 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
1826 list_remove(&dn->dn_dirty_records[txgoff], dr);
1827 list_insert_tail(&new->dt.di.dr_children, dr);
1828 dr->dr_parent = new;
1829 }
1830 }
1831 mutex_exit(&new->dt.di.dr_mtx);
1832 mutex_exit(&dn->dn_mtx);
1833 }
1834
1835 int
dnode_set_nlevels(dnode_t * dn,int nlevels,dmu_tx_t * tx)1836 dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
1837 {
1838 int ret = 0;
1839
1840 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1841
1842 if (dn->dn_nlevels == nlevels) {
1843 ret = 0;
1844 goto out;
1845 } else if (nlevels < dn->dn_nlevels) {
1846 ret = SET_ERROR(EINVAL);
1847 goto out;
1848 }
1849
1850 dnode_set_nlevels_impl(dn, nlevels, tx);
1851
1852 out:
1853 rw_exit(&dn->dn_struct_rwlock);
1854 return (ret);
1855 }
1856
1857 /* read-holding callers must not rely on the lock being continuously held */
1858 void
dnode_new_blkid(dnode_t * dn,uint64_t blkid,dmu_tx_t * tx,boolean_t have_read,boolean_t force)1859 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read,
1860 boolean_t force)
1861 {
1862 int epbs, new_nlevels;
1863 uint64_t sz;
1864
1865 ASSERT(blkid != DMU_BONUS_BLKID);
1866
1867 ASSERT(have_read ?
1868 RW_READ_HELD(&dn->dn_struct_rwlock) :
1869 RW_WRITE_HELD(&dn->dn_struct_rwlock));
1870
1871 /*
1872 * if we have a read-lock, check to see if we need to do any work
1873 * before upgrading to a write-lock.
1874 */
1875 if (have_read) {
1876 if (blkid <= dn->dn_maxblkid)
1877 return;
1878
1879 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
1880 rw_exit(&dn->dn_struct_rwlock);
1881 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1882 }
1883 }
1884
1885 /*
1886 * Raw sends (indicated by the force flag) require that we take the
1887 * given blkid even if the value is lower than the current value.
1888 */
1889 if (!force && blkid <= dn->dn_maxblkid)
1890 goto out;
1891
1892 /*
1893 * We use the (otherwise unused) top bit of dn_next_maxblkid[txgoff]
1894 * to indicate that this field is set. This allows us to set the
1895 * maxblkid to 0 on an existing object in dnode_sync().
1896 */
1897 dn->dn_maxblkid = blkid;
1898 dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
1899 blkid | DMU_NEXT_MAXBLKID_SET;
1900
1901 /*
1902 * Compute the number of levels necessary to support the new maxblkid.
1903 * Raw sends will ensure nlevels is set correctly for us.
1904 */
1905 new_nlevels = 1;
1906 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1907 for (sz = dn->dn_nblkptr;
1908 sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
1909 new_nlevels++;
1910
1911 if (!force) {
1912 if (new_nlevels > dn->dn_nlevels)
1913 dnode_set_nlevels_impl(dn, new_nlevels, tx);
1914 } else {
1915 ASSERT3U(dn->dn_nlevels, >=, new_nlevels);
1916 }
1917
1918 out:
1919 if (have_read)
1920 rw_downgrade(&dn->dn_struct_rwlock);
1921 }
1922
1923 static void
dnode_dirty_l1(dnode_t * dn,uint64_t l1blkid,dmu_tx_t * tx)1924 dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
1925 {
1926 dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
1927 if (db != NULL) {
1928 dmu_buf_will_dirty(&db->db, tx);
1929 dbuf_rele(db, FTAG);
1930 }
1931 }
1932
1933 /*
1934 * Dirty all the in-core level-1 dbufs in the range specified by start_blkid
1935 * and end_blkid.
1936 */
1937 static void
dnode_dirty_l1range(dnode_t * dn,uint64_t start_blkid,uint64_t end_blkid,dmu_tx_t * tx)1938 dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1939 dmu_tx_t *tx)
1940 {
1941 dmu_buf_impl_t db_search;
1942 dmu_buf_impl_t *db;
1943 avl_index_t where;
1944
1945 mutex_enter(&dn->dn_dbufs_mtx);
1946
1947 db_search.db_level = 1;
1948 db_search.db_blkid = start_blkid + 1;
1949 db_search.db_state = DB_SEARCH;
1950 for (;;) {
1951
1952 db = avl_find(&dn->dn_dbufs, &db_search, &where);
1953 if (db == NULL)
1954 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1955
1956 if (db == NULL || db->db_level != 1 ||
1957 db->db_blkid >= end_blkid) {
1958 break;
1959 }
1960
1961 /*
1962 * Setup the next blkid we want to search for.
1963 */
1964 db_search.db_blkid = db->db_blkid + 1;
1965 ASSERT3U(db->db_blkid, >=, start_blkid);
1966
1967 /*
1968 * If the dbuf transitions to DB_EVICTING while we're trying
1969 * to dirty it, then we will be unable to discover it in
1970 * the dbuf hash table. This will result in a call to
1971 * dbuf_create() which needs to acquire the dn_dbufs_mtx
1972 * lock. To avoid a deadlock, we drop the lock before
1973 * dirtying the level-1 dbuf.
1974 */
1975 mutex_exit(&dn->dn_dbufs_mtx);
1976 dnode_dirty_l1(dn, db->db_blkid, tx);
1977 mutex_enter(&dn->dn_dbufs_mtx);
1978 }
1979
1980 #ifdef ZFS_DEBUG
1981 /*
1982 * Walk all the in-core level-1 dbufs and verify they have been dirtied.
1983 */
1984 db_search.db_level = 1;
1985 db_search.db_blkid = start_blkid + 1;
1986 db_search.db_state = DB_SEARCH;
1987 db = avl_find(&dn->dn_dbufs, &db_search, &where);
1988 if (db == NULL)
1989 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1990 for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
1991 if (db->db_level != 1 || db->db_blkid >= end_blkid)
1992 break;
1993 ASSERT(db->db_dirtycnt > 0);
1994 }
1995 #endif
1996 mutex_exit(&dn->dn_dbufs_mtx);
1997 }
1998
1999 void
dnode_free_range(dnode_t * dn,uint64_t off,uint64_t len,dmu_tx_t * tx)2000 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
2001 {
2002 dmu_buf_impl_t *db;
2003 uint64_t blkoff, blkid, nblks;
2004 int blksz, blkshift, head, tail;
2005 int trunc = FALSE;
2006 int epbs;
2007
2008 blksz = dn->dn_datablksz;
2009 blkshift = dn->dn_datablkshift;
2010 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2011
2012 if (len == DMU_OBJECT_END) {
2013 len = UINT64_MAX - off;
2014 trunc = TRUE;
2015 }
2016
2017 /*
2018 * First, block align the region to free:
2019 */
2020 if (ISP2(blksz)) {
2021 head = P2NPHASE(off, blksz);
2022 blkoff = P2PHASE(off, blksz);
2023 if ((off >> blkshift) > dn->dn_maxblkid)
2024 return;
2025 } else {
2026 ASSERT(dn->dn_maxblkid == 0);
2027 if (off == 0 && len >= blksz) {
2028 /*
2029 * Freeing the whole block; fast-track this request.
2030 */
2031 blkid = 0;
2032 nblks = 1;
2033 if (dn->dn_nlevels > 1) {
2034 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2035 dnode_dirty_l1(dn, 0, tx);
2036 rw_exit(&dn->dn_struct_rwlock);
2037 }
2038 goto done;
2039 } else if (off >= blksz) {
2040 /* Freeing past end-of-data */
2041 return;
2042 } else {
2043 /* Freeing part of the block. */
2044 head = blksz - off;
2045 ASSERT3U(head, >, 0);
2046 }
2047 blkoff = off;
2048 }
2049 /* zero out any partial block data at the start of the range */
2050 if (head) {
2051 int res;
2052 ASSERT3U(blkoff + head, ==, blksz);
2053 if (len < head)
2054 head = len;
2055 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2056 res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off),
2057 TRUE, FALSE, FTAG, &db);
2058 rw_exit(&dn->dn_struct_rwlock);
2059 if (res == 0) {
2060 caddr_t data;
2061 boolean_t dirty;
2062
2063 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER,
2064 FTAG);
2065 /* don't dirty if it isn't on disk and isn't dirty */
2066 dirty = db->db_last_dirty ||
2067 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
2068 dmu_buf_unlock_parent(db, dblt, FTAG);
2069 if (dirty) {
2070 dmu_buf_will_dirty(&db->db, tx);
2071 data = db->db.db_data;
2072 bzero(data + blkoff, head);
2073 }
2074 dbuf_rele(db, FTAG);
2075 }
2076 off += head;
2077 len -= head;
2078 }
2079
2080 /* If the range was less than one block, we're done */
2081 if (len == 0)
2082 return;
2083
2084 /* If the remaining range is past end of file, we're done */
2085 if ((off >> blkshift) > dn->dn_maxblkid)
2086 return;
2087
2088 ASSERT(ISP2(blksz));
2089 if (trunc)
2090 tail = 0;
2091 else
2092 tail = P2PHASE(len, blksz);
2093
2094 ASSERT0(P2PHASE(off, blksz));
2095 /* zero out any partial block data at the end of the range */
2096 if (tail) {
2097 int res;
2098 if (len < tail)
2099 tail = len;
2100 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2101 res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len),
2102 TRUE, FALSE, FTAG, &db);
2103 rw_exit(&dn->dn_struct_rwlock);
2104 if (res == 0) {
2105 boolean_t dirty;
2106 /* don't dirty if not on disk and not dirty */
2107 db_lock_type_t type = dmu_buf_lock_parent(db, RW_READER,
2108 FTAG);
2109 dirty = db->db_last_dirty ||
2110 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
2111 dmu_buf_unlock_parent(db, type, FTAG);
2112 if (dirty) {
2113 dmu_buf_will_dirty(&db->db, tx);
2114 bzero(db->db.db_data, tail);
2115 }
2116 dbuf_rele(db, FTAG);
2117 }
2118 len -= tail;
2119 }
2120
2121 /* If the range did not include a full block, we are done */
2122 if (len == 0)
2123 return;
2124
2125 ASSERT(IS_P2ALIGNED(off, blksz));
2126 ASSERT(trunc || IS_P2ALIGNED(len, blksz));
2127 blkid = off >> blkshift;
2128 nblks = len >> blkshift;
2129 if (trunc)
2130 nblks += 1;
2131
2132 /*
2133 * Dirty all the indirect blocks in this range. Note that only
2134 * the first and last indirect blocks can actually be written
2135 * (if they were partially freed) -- they must be dirtied, even if
2136 * they do not exist on disk yet. The interior blocks will
2137 * be freed by free_children(), so they will not actually be written.
2138 * Even though these interior blocks will not be written, we
2139 * dirty them for two reasons:
2140 *
2141 * - It ensures that the indirect blocks remain in memory until
2142 * syncing context. (They have already been prefetched by
2143 * dmu_tx_hold_free(), so we don't have to worry about reading
2144 * them serially here.)
2145 *
2146 * - The dirty space accounting will put pressure on the txg sync
2147 * mechanism to begin syncing, and to delay transactions if there
2148 * is a large amount of freeing. Even though these indirect
2149 * blocks will not be written, we could need to write the same
2150 * amount of space if we copy the freed BPs into deadlists.
2151 */
2152 if (dn->dn_nlevels > 1) {
2153 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2154 uint64_t first, last;
2155
2156 first = blkid >> epbs;
2157 dnode_dirty_l1(dn, first, tx);
2158 if (trunc)
2159 last = dn->dn_maxblkid >> epbs;
2160 else
2161 last = (blkid + nblks - 1) >> epbs;
2162 if (last != first)
2163 dnode_dirty_l1(dn, last, tx);
2164
2165 dnode_dirty_l1range(dn, first, last, tx);
2166
2167 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
2168 SPA_BLKPTRSHIFT;
2169 for (uint64_t i = first + 1; i < last; i++) {
2170 /*
2171 * Set i to the blockid of the next non-hole
2172 * level-1 indirect block at or after i. Note
2173 * that dnode_next_offset() operates in terms of
2174 * level-0-equivalent bytes.
2175 */
2176 uint64_t ibyte = i << shift;
2177 int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
2178 &ibyte, 2, 1, 0);
2179 i = ibyte >> shift;
2180 if (i >= last)
2181 break;
2182
2183 /*
2184 * Normally we should not see an error, either
2185 * from dnode_next_offset() or dbuf_hold_level()
2186 * (except for ESRCH from dnode_next_offset).
2187 * If there is an i/o error, then when we read
2188 * this block in syncing context, it will use
2189 * ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
2190 * to the "failmode" property. dnode_next_offset()
2191 * doesn't have a flag to indicate MUSTSUCCEED.
2192 */
2193 if (err != 0)
2194 break;
2195
2196 dnode_dirty_l1(dn, i, tx);
2197 }
2198 rw_exit(&dn->dn_struct_rwlock);
2199 }
2200
2201 done:
2202 /*
2203 * Add this range to the dnode range list.
2204 * We will finish up this free operation in the syncing phase.
2205 */
2206 mutex_enter(&dn->dn_mtx);
2207 int txgoff = tx->tx_txg & TXG_MASK;
2208 if (dn->dn_free_ranges[txgoff] == NULL) {
2209 dn->dn_free_ranges[txgoff] = range_tree_create(NULL,
2210 RANGE_SEG64, NULL, 0, 0);
2211 }
2212 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
2213 range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
2214 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
2215 blkid, nblks, tx->tx_txg);
2216 mutex_exit(&dn->dn_mtx);
2217
2218 dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
2219 dnode_setdirty(dn, tx);
2220 }
2221
2222 static boolean_t
dnode_spill_freed(dnode_t * dn)2223 dnode_spill_freed(dnode_t *dn)
2224 {
2225 int i;
2226
2227 mutex_enter(&dn->dn_mtx);
2228 for (i = 0; i < TXG_SIZE; i++) {
2229 if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
2230 break;
2231 }
2232 mutex_exit(&dn->dn_mtx);
2233 return (i < TXG_SIZE);
2234 }
2235
2236 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
2237 uint64_t
dnode_block_freed(dnode_t * dn,uint64_t blkid)2238 dnode_block_freed(dnode_t *dn, uint64_t blkid)
2239 {
2240 void *dp = spa_get_dsl(dn->dn_objset->os_spa);
2241 int i;
2242
2243 if (blkid == DMU_BONUS_BLKID)
2244 return (FALSE);
2245
2246 /*
2247 * If we're in the process of opening the pool, dp will not be
2248 * set yet, but there shouldn't be anything dirty.
2249 */
2250 if (dp == NULL)
2251 return (FALSE);
2252
2253 if (dn->dn_free_txg)
2254 return (TRUE);
2255
2256 if (blkid == DMU_SPILL_BLKID)
2257 return (dnode_spill_freed(dn));
2258
2259 mutex_enter(&dn->dn_mtx);
2260 for (i = 0; i < TXG_SIZE; i++) {
2261 if (dn->dn_free_ranges[i] != NULL &&
2262 range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
2263 break;
2264 }
2265 mutex_exit(&dn->dn_mtx);
2266 return (i < TXG_SIZE);
2267 }
2268
2269 /* call from syncing context when we actually write/free space for this dnode */
2270 void
dnode_diduse_space(dnode_t * dn,int64_t delta)2271 dnode_diduse_space(dnode_t *dn, int64_t delta)
2272 {
2273 uint64_t space;
2274 dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
2275 dn, dn->dn_phys,
2276 (u_longlong_t)dn->dn_phys->dn_used,
2277 (longlong_t)delta);
2278
2279 mutex_enter(&dn->dn_mtx);
2280 space = DN_USED_BYTES(dn->dn_phys);
2281 if (delta > 0) {
2282 ASSERT3U(space + delta, >=, space); /* no overflow */
2283 } else {
2284 ASSERT3U(space, >=, -delta); /* no underflow */
2285 }
2286 space += delta;
2287 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
2288 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
2289 ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
2290 dn->dn_phys->dn_used = space >> DEV_BSHIFT;
2291 } else {
2292 dn->dn_phys->dn_used = space;
2293 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
2294 }
2295 mutex_exit(&dn->dn_mtx);
2296 }
2297
2298 /*
2299 * Scans a block at the indicated "level" looking for a hole or data,
2300 * depending on 'flags'.
2301 *
2302 * If level > 0, then we are scanning an indirect block looking at its
2303 * pointers. If level == 0, then we are looking at a block of dnodes.
2304 *
2305 * If we don't find what we are looking for in the block, we return ESRCH.
2306 * Otherwise, return with *offset pointing to the beginning (if searching
2307 * forwards) or end (if searching backwards) of the range covered by the
2308 * block pointer we matched on (or dnode).
2309 *
2310 * The basic search algorithm used below by dnode_next_offset() is to
2311 * use this function to search up the block tree (widen the search) until
2312 * we find something (i.e., we don't return ESRCH) and then search back
2313 * down the tree (narrow the search) until we reach our original search
2314 * level.
2315 */
2316 static int
dnode_next_offset_level(dnode_t * dn,int flags,uint64_t * offset,int lvl,uint64_t blkfill,uint64_t txg)2317 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
2318 int lvl, uint64_t blkfill, uint64_t txg)
2319 {
2320 dmu_buf_impl_t *db = NULL;
2321 void *data = NULL;
2322 uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2323 uint64_t epb = 1ULL << epbs;
2324 uint64_t minfill, maxfill;
2325 boolean_t hole;
2326 int i, inc, error, span;
2327
2328 dprintf("probing object %llu offset %llx level %d of %u\n",
2329 dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
2330
2331 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2332
2333 hole = ((flags & DNODE_FIND_HOLE) != 0);
2334 inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
2335 ASSERT(txg == 0 || !hole);
2336
2337 if (lvl == dn->dn_phys->dn_nlevels) {
2338 error = 0;
2339 epb = dn->dn_phys->dn_nblkptr;
2340 data = dn->dn_phys->dn_blkptr;
2341 } else {
2342 uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
2343 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
2344 if (error) {
2345 if (error != ENOENT)
2346 return (error);
2347 if (hole)
2348 return (0);
2349 /*
2350 * This can only happen when we are searching up
2351 * the block tree for data. We don't really need to
2352 * adjust the offset, as we will just end up looking
2353 * at the pointer to this block in its parent, and its
2354 * going to be unallocated, so we will skip over it.
2355 */
2356 return (SET_ERROR(ESRCH));
2357 }
2358 error = dbuf_read(db, NULL,
2359 DB_RF_CANFAIL | DB_RF_HAVESTRUCT | DB_RF_NO_DECRYPT);
2360 if (error) {
2361 dbuf_rele(db, FTAG);
2362 return (error);
2363 }
2364 data = db->db.db_data;
2365 rw_enter(&db->db_rwlock, RW_READER);
2366 }
2367
2368 if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
2369 db->db_blkptr->blk_birth <= txg ||
2370 BP_IS_HOLE(db->db_blkptr))) {
2371 /*
2372 * This can only happen when we are searching up the tree
2373 * and these conditions mean that we need to keep climbing.
2374 */
2375 error = SET_ERROR(ESRCH);
2376 } else if (lvl == 0) {
2377 dnode_phys_t *dnp = data;
2378
2379 ASSERT(dn->dn_type == DMU_OT_DNODE);
2380 ASSERT(!(flags & DNODE_FIND_BACKWARDS));
2381
2382 for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
2383 i < blkfill; i += dnp[i].dn_extra_slots + 1) {
2384 if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
2385 break;
2386 }
2387
2388 if (i == blkfill)
2389 error = SET_ERROR(ESRCH);
2390
2391 *offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
2392 (i << DNODE_SHIFT);
2393 } else {
2394 blkptr_t *bp = data;
2395 uint64_t start = *offset;
2396 span = (lvl - 1) * epbs + dn->dn_datablkshift;
2397 minfill = 0;
2398 maxfill = blkfill << ((lvl - 1) * epbs);
2399
2400 if (hole)
2401 maxfill--;
2402 else
2403 minfill++;
2404
2405 *offset = *offset >> span;
2406 for (i = BF64_GET(*offset, 0, epbs);
2407 i >= 0 && i < epb; i += inc) {
2408 if (BP_GET_FILL(&bp[i]) >= minfill &&
2409 BP_GET_FILL(&bp[i]) <= maxfill &&
2410 (hole || bp[i].blk_birth > txg))
2411 break;
2412 if (inc > 0 || *offset > 0)
2413 *offset += inc;
2414 }
2415 *offset = *offset << span;
2416 if (inc < 0) {
2417 /* traversing backwards; position offset at the end */
2418 ASSERT3U(*offset, <=, start);
2419 *offset = MIN(*offset + (1ULL << span) - 1, start);
2420 } else if (*offset < start) {
2421 *offset = start;
2422 }
2423 if (i < 0 || i >= epb)
2424 error = SET_ERROR(ESRCH);
2425 }
2426
2427 if (db != NULL) {
2428 rw_exit(&db->db_rwlock);
2429 dbuf_rele(db, FTAG);
2430 }
2431
2432 return (error);
2433 }
2434
2435 /*
2436 * Find the next hole, data, or sparse region at or after *offset.
2437 * The value 'blkfill' tells us how many items we expect to find
2438 * in an L0 data block; this value is 1 for normal objects,
2439 * DNODES_PER_BLOCK for the meta dnode, and some fraction of
2440 * DNODES_PER_BLOCK when searching for sparse regions thereof.
2441 *
2442 * Examples:
2443 *
2444 * dnode_next_offset(dn, flags, offset, 1, 1, 0);
2445 * Finds the next/previous hole/data in a file.
2446 * Used in dmu_offset_next().
2447 *
2448 * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
2449 * Finds the next free/allocated dnode an objset's meta-dnode.
2450 * Only finds objects that have new contents since txg (ie.
2451 * bonus buffer changes and content removal are ignored).
2452 * Used in dmu_object_next().
2453 *
2454 * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
2455 * Finds the next L2 meta-dnode bp that's at most 1/4 full.
2456 * Used in dmu_object_alloc().
2457 */
2458 int
dnode_next_offset(dnode_t * dn,int flags,uint64_t * offset,int minlvl,uint64_t blkfill,uint64_t txg)2459 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
2460 int minlvl, uint64_t blkfill, uint64_t txg)
2461 {
2462 uint64_t initial_offset = *offset;
2463 int lvl, maxlvl;
2464 int error = 0;
2465
2466 if (!(flags & DNODE_FIND_HAVELOCK))
2467 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2468
2469 if (dn->dn_phys->dn_nlevels == 0) {
2470 error = SET_ERROR(ESRCH);
2471 goto out;
2472 }
2473
2474 if (dn->dn_datablkshift == 0) {
2475 if (*offset < dn->dn_datablksz) {
2476 if (flags & DNODE_FIND_HOLE)
2477 *offset = dn->dn_datablksz;
2478 } else {
2479 error = SET_ERROR(ESRCH);
2480 }
2481 goto out;
2482 }
2483
2484 maxlvl = dn->dn_phys->dn_nlevels;
2485
2486 for (lvl = minlvl; lvl <= maxlvl; lvl++) {
2487 error = dnode_next_offset_level(dn,
2488 flags, offset, lvl, blkfill, txg);
2489 if (error != ESRCH)
2490 break;
2491 }
2492
2493 while (error == 0 && --lvl >= minlvl) {
2494 error = dnode_next_offset_level(dn,
2495 flags, offset, lvl, blkfill, txg);
2496 }
2497
2498 /*
2499 * There's always a "virtual hole" at the end of the object, even
2500 * if all BP's which physically exist are non-holes.
2501 */
2502 if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
2503 minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
2504 error = 0;
2505 }
2506
2507 if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
2508 initial_offset < *offset : initial_offset > *offset))
2509 error = SET_ERROR(ESRCH);
2510 out:
2511 if (!(flags & DNODE_FIND_HAVELOCK))
2512 rw_exit(&dn->dn_struct_rwlock);
2513
2514 return (error);
2515 }
2516