1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_impl.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/spa.h>
38 #include <sys/zio.h>
39 #include <sys/dmu_zfetch.h>
40 #include <sys/range_tree.h>
41 #include <sys/trace_zfs.h>
42 #include <sys/zfs_project.h>
43
44 dnode_stats_t dnode_stats = {
45 { "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
46 { "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
47 { "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
48 { "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
49 { "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
50 { "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
51 { "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
52 { "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
53 { "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
54 { "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
55 { "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
56 { "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
57 { "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
58 { "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
59 { "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
60 { "dnode_allocate", KSTAT_DATA_UINT64 },
61 { "dnode_reallocate", KSTAT_DATA_UINT64 },
62 { "dnode_buf_evict", KSTAT_DATA_UINT64 },
63 { "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
64 { "dnode_alloc_race", KSTAT_DATA_UINT64 },
65 { "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
66 { "dnode_move_invalid", KSTAT_DATA_UINT64 },
67 { "dnode_move_recheck1", KSTAT_DATA_UINT64 },
68 { "dnode_move_recheck2", KSTAT_DATA_UINT64 },
69 { "dnode_move_special", KSTAT_DATA_UINT64 },
70 { "dnode_move_handle", KSTAT_DATA_UINT64 },
71 { "dnode_move_rwlock", KSTAT_DATA_UINT64 },
72 { "dnode_move_active", KSTAT_DATA_UINT64 },
73 };
74
75 dnode_sums_t dnode_sums;
76
77 static kstat_t *dnode_ksp;
78 static kmem_cache_t *dnode_cache;
79
80 static dnode_phys_t dnode_phys_zero __maybe_unused;
81
82 int zfs_default_bs = SPA_MINBLOCKSHIFT;
83 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
84
85 #ifdef _KERNEL
86 static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
87 #endif /* _KERNEL */
88
89 static int
dbuf_compare(const void * x1,const void * x2)90 dbuf_compare(const void *x1, const void *x2)
91 {
92 const dmu_buf_impl_t *d1 = x1;
93 const dmu_buf_impl_t *d2 = x2;
94
95 int cmp = TREE_CMP(d1->db_level, d2->db_level);
96 if (likely(cmp))
97 return (cmp);
98
99 cmp = TREE_CMP(d1->db_blkid, d2->db_blkid);
100 if (likely(cmp))
101 return (cmp);
102
103 if (d1->db_state == DB_MARKER) {
104 ASSERT3S(d2->db_state, !=, DB_MARKER);
105 return (TREE_PCMP(d1->db_parent, d2));
106 } else if (d2->db_state == DB_MARKER) {
107 ASSERT3S(d1->db_state, !=, DB_MARKER);
108 return (TREE_PCMP(d1, d2->db_parent));
109 }
110
111 if (d1->db_state == DB_SEARCH) {
112 ASSERT3S(d2->db_state, !=, DB_SEARCH);
113 return (-1);
114 } else if (d2->db_state == DB_SEARCH) {
115 ASSERT3S(d1->db_state, !=, DB_SEARCH);
116 return (1);
117 }
118
119 return (TREE_PCMP(d1, d2));
120 }
121
122 static int
dnode_cons(void * arg,void * unused,int kmflag)123 dnode_cons(void *arg, void *unused, int kmflag)
124 {
125 (void) unused, (void) kmflag;
126 dnode_t *dn = arg;
127
128 rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
129 mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
130 mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
131 cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
132 cv_init(&dn->dn_nodnholds, NULL, CV_DEFAULT, NULL);
133
134 /*
135 * Every dbuf has a reference, and dropping a tracked reference is
136 * O(number of references), so don't track dn_holds.
137 */
138 zfs_refcount_create_untracked(&dn->dn_holds);
139 zfs_refcount_create(&dn->dn_tx_holds);
140 list_link_init(&dn->dn_link);
141
142 memset(dn->dn_next_type, 0, sizeof (dn->dn_next_type));
143 memset(dn->dn_next_nblkptr, 0, sizeof (dn->dn_next_nblkptr));
144 memset(dn->dn_next_nlevels, 0, sizeof (dn->dn_next_nlevels));
145 memset(dn->dn_next_indblkshift, 0, sizeof (dn->dn_next_indblkshift));
146 memset(dn->dn_next_bonustype, 0, sizeof (dn->dn_next_bonustype));
147 memset(dn->dn_rm_spillblk, 0, sizeof (dn->dn_rm_spillblk));
148 memset(dn->dn_next_bonuslen, 0, sizeof (dn->dn_next_bonuslen));
149 memset(dn->dn_next_blksz, 0, sizeof (dn->dn_next_blksz));
150 memset(dn->dn_next_maxblkid, 0, sizeof (dn->dn_next_maxblkid));
151
152 for (int i = 0; i < TXG_SIZE; i++) {
153 multilist_link_init(&dn->dn_dirty_link[i]);
154 dn->dn_free_ranges[i] = NULL;
155 list_create(&dn->dn_dirty_records[i],
156 sizeof (dbuf_dirty_record_t),
157 offsetof(dbuf_dirty_record_t, dr_dirty_node));
158 }
159
160 dn->dn_allocated_txg = 0;
161 dn->dn_free_txg = 0;
162 dn->dn_assigned_txg = 0;
163 dn->dn_dirty_txg = 0;
164 dn->dn_dirtyctx = 0;
165 dn->dn_dirtyctx_firstset = NULL;
166 dn->dn_bonus = NULL;
167 dn->dn_have_spill = B_FALSE;
168 dn->dn_zio = NULL;
169 dn->dn_oldused = 0;
170 dn->dn_oldflags = 0;
171 dn->dn_olduid = 0;
172 dn->dn_oldgid = 0;
173 dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
174 dn->dn_newuid = 0;
175 dn->dn_newgid = 0;
176 dn->dn_newprojid = ZFS_DEFAULT_PROJID;
177 dn->dn_id_flags = 0;
178
179 dn->dn_dbufs_count = 0;
180 avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
181 offsetof(dmu_buf_impl_t, db_link));
182
183 dn->dn_moved = 0;
184 return (0);
185 }
186
187 static void
dnode_dest(void * arg,void * unused)188 dnode_dest(void *arg, void *unused)
189 {
190 (void) unused;
191 dnode_t *dn = arg;
192
193 rw_destroy(&dn->dn_struct_rwlock);
194 mutex_destroy(&dn->dn_mtx);
195 mutex_destroy(&dn->dn_dbufs_mtx);
196 cv_destroy(&dn->dn_notxholds);
197 cv_destroy(&dn->dn_nodnholds);
198 zfs_refcount_destroy(&dn->dn_holds);
199 zfs_refcount_destroy(&dn->dn_tx_holds);
200 ASSERT(!list_link_active(&dn->dn_link));
201
202 for (int i = 0; i < TXG_SIZE; i++) {
203 ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
204 ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
205 list_destroy(&dn->dn_dirty_records[i]);
206 ASSERT0(dn->dn_next_nblkptr[i]);
207 ASSERT0(dn->dn_next_nlevels[i]);
208 ASSERT0(dn->dn_next_indblkshift[i]);
209 ASSERT0(dn->dn_next_bonustype[i]);
210 ASSERT0(dn->dn_rm_spillblk[i]);
211 ASSERT0(dn->dn_next_bonuslen[i]);
212 ASSERT0(dn->dn_next_blksz[i]);
213 ASSERT0(dn->dn_next_maxblkid[i]);
214 }
215
216 ASSERT0(dn->dn_allocated_txg);
217 ASSERT0(dn->dn_free_txg);
218 ASSERT0(dn->dn_assigned_txg);
219 ASSERT0(dn->dn_dirty_txg);
220 ASSERT0(dn->dn_dirtyctx);
221 ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
222 ASSERT3P(dn->dn_bonus, ==, NULL);
223 ASSERT(!dn->dn_have_spill);
224 ASSERT3P(dn->dn_zio, ==, NULL);
225 ASSERT0(dn->dn_oldused);
226 ASSERT0(dn->dn_oldflags);
227 ASSERT0(dn->dn_olduid);
228 ASSERT0(dn->dn_oldgid);
229 ASSERT0(dn->dn_oldprojid);
230 ASSERT0(dn->dn_newuid);
231 ASSERT0(dn->dn_newgid);
232 ASSERT0(dn->dn_newprojid);
233 ASSERT0(dn->dn_id_flags);
234
235 ASSERT0(dn->dn_dbufs_count);
236 avl_destroy(&dn->dn_dbufs);
237 }
238
239 static int
dnode_kstats_update(kstat_t * ksp,int rw)240 dnode_kstats_update(kstat_t *ksp, int rw)
241 {
242 dnode_stats_t *ds = ksp->ks_data;
243
244 if (rw == KSTAT_WRITE)
245 return (EACCES);
246 ds->dnode_hold_dbuf_hold.value.ui64 =
247 wmsum_value(&dnode_sums.dnode_hold_dbuf_hold);
248 ds->dnode_hold_dbuf_read.value.ui64 =
249 wmsum_value(&dnode_sums.dnode_hold_dbuf_read);
250 ds->dnode_hold_alloc_hits.value.ui64 =
251 wmsum_value(&dnode_sums.dnode_hold_alloc_hits);
252 ds->dnode_hold_alloc_misses.value.ui64 =
253 wmsum_value(&dnode_sums.dnode_hold_alloc_misses);
254 ds->dnode_hold_alloc_interior.value.ui64 =
255 wmsum_value(&dnode_sums.dnode_hold_alloc_interior);
256 ds->dnode_hold_alloc_lock_retry.value.ui64 =
257 wmsum_value(&dnode_sums.dnode_hold_alloc_lock_retry);
258 ds->dnode_hold_alloc_lock_misses.value.ui64 =
259 wmsum_value(&dnode_sums.dnode_hold_alloc_lock_misses);
260 ds->dnode_hold_alloc_type_none.value.ui64 =
261 wmsum_value(&dnode_sums.dnode_hold_alloc_type_none);
262 ds->dnode_hold_free_hits.value.ui64 =
263 wmsum_value(&dnode_sums.dnode_hold_free_hits);
264 ds->dnode_hold_free_misses.value.ui64 =
265 wmsum_value(&dnode_sums.dnode_hold_free_misses);
266 ds->dnode_hold_free_lock_misses.value.ui64 =
267 wmsum_value(&dnode_sums.dnode_hold_free_lock_misses);
268 ds->dnode_hold_free_lock_retry.value.ui64 =
269 wmsum_value(&dnode_sums.dnode_hold_free_lock_retry);
270 ds->dnode_hold_free_refcount.value.ui64 =
271 wmsum_value(&dnode_sums.dnode_hold_free_refcount);
272 ds->dnode_hold_free_overflow.value.ui64 =
273 wmsum_value(&dnode_sums.dnode_hold_free_overflow);
274 ds->dnode_free_interior_lock_retry.value.ui64 =
275 wmsum_value(&dnode_sums.dnode_free_interior_lock_retry);
276 ds->dnode_allocate.value.ui64 =
277 wmsum_value(&dnode_sums.dnode_allocate);
278 ds->dnode_reallocate.value.ui64 =
279 wmsum_value(&dnode_sums.dnode_reallocate);
280 ds->dnode_buf_evict.value.ui64 =
281 wmsum_value(&dnode_sums.dnode_buf_evict);
282 ds->dnode_alloc_next_chunk.value.ui64 =
283 wmsum_value(&dnode_sums.dnode_alloc_next_chunk);
284 ds->dnode_alloc_race.value.ui64 =
285 wmsum_value(&dnode_sums.dnode_alloc_race);
286 ds->dnode_alloc_next_block.value.ui64 =
287 wmsum_value(&dnode_sums.dnode_alloc_next_block);
288 ds->dnode_move_invalid.value.ui64 =
289 wmsum_value(&dnode_sums.dnode_move_invalid);
290 ds->dnode_move_recheck1.value.ui64 =
291 wmsum_value(&dnode_sums.dnode_move_recheck1);
292 ds->dnode_move_recheck2.value.ui64 =
293 wmsum_value(&dnode_sums.dnode_move_recheck2);
294 ds->dnode_move_special.value.ui64 =
295 wmsum_value(&dnode_sums.dnode_move_special);
296 ds->dnode_move_handle.value.ui64 =
297 wmsum_value(&dnode_sums.dnode_move_handle);
298 ds->dnode_move_rwlock.value.ui64 =
299 wmsum_value(&dnode_sums.dnode_move_rwlock);
300 ds->dnode_move_active.value.ui64 =
301 wmsum_value(&dnode_sums.dnode_move_active);
302 return (0);
303 }
304
305 void
dnode_init(void)306 dnode_init(void)
307 {
308 ASSERT(dnode_cache == NULL);
309 dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
310 0, dnode_cons, dnode_dest, NULL, NULL, NULL, KMC_RECLAIMABLE);
311 kmem_cache_set_move(dnode_cache, dnode_move);
312
313 wmsum_init(&dnode_sums.dnode_hold_dbuf_hold, 0);
314 wmsum_init(&dnode_sums.dnode_hold_dbuf_read, 0);
315 wmsum_init(&dnode_sums.dnode_hold_alloc_hits, 0);
316 wmsum_init(&dnode_sums.dnode_hold_alloc_misses, 0);
317 wmsum_init(&dnode_sums.dnode_hold_alloc_interior, 0);
318 wmsum_init(&dnode_sums.dnode_hold_alloc_lock_retry, 0);
319 wmsum_init(&dnode_sums.dnode_hold_alloc_lock_misses, 0);
320 wmsum_init(&dnode_sums.dnode_hold_alloc_type_none, 0);
321 wmsum_init(&dnode_sums.dnode_hold_free_hits, 0);
322 wmsum_init(&dnode_sums.dnode_hold_free_misses, 0);
323 wmsum_init(&dnode_sums.dnode_hold_free_lock_misses, 0);
324 wmsum_init(&dnode_sums.dnode_hold_free_lock_retry, 0);
325 wmsum_init(&dnode_sums.dnode_hold_free_refcount, 0);
326 wmsum_init(&dnode_sums.dnode_hold_free_overflow, 0);
327 wmsum_init(&dnode_sums.dnode_free_interior_lock_retry, 0);
328 wmsum_init(&dnode_sums.dnode_allocate, 0);
329 wmsum_init(&dnode_sums.dnode_reallocate, 0);
330 wmsum_init(&dnode_sums.dnode_buf_evict, 0);
331 wmsum_init(&dnode_sums.dnode_alloc_next_chunk, 0);
332 wmsum_init(&dnode_sums.dnode_alloc_race, 0);
333 wmsum_init(&dnode_sums.dnode_alloc_next_block, 0);
334 wmsum_init(&dnode_sums.dnode_move_invalid, 0);
335 wmsum_init(&dnode_sums.dnode_move_recheck1, 0);
336 wmsum_init(&dnode_sums.dnode_move_recheck2, 0);
337 wmsum_init(&dnode_sums.dnode_move_special, 0);
338 wmsum_init(&dnode_sums.dnode_move_handle, 0);
339 wmsum_init(&dnode_sums.dnode_move_rwlock, 0);
340 wmsum_init(&dnode_sums.dnode_move_active, 0);
341
342 dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
343 KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
344 KSTAT_FLAG_VIRTUAL);
345 if (dnode_ksp != NULL) {
346 dnode_ksp->ks_data = &dnode_stats;
347 dnode_ksp->ks_update = dnode_kstats_update;
348 kstat_install(dnode_ksp);
349 }
350 }
351
352 void
dnode_fini(void)353 dnode_fini(void)
354 {
355 if (dnode_ksp != NULL) {
356 kstat_delete(dnode_ksp);
357 dnode_ksp = NULL;
358 }
359
360 wmsum_fini(&dnode_sums.dnode_hold_dbuf_hold);
361 wmsum_fini(&dnode_sums.dnode_hold_dbuf_read);
362 wmsum_fini(&dnode_sums.dnode_hold_alloc_hits);
363 wmsum_fini(&dnode_sums.dnode_hold_alloc_misses);
364 wmsum_fini(&dnode_sums.dnode_hold_alloc_interior);
365 wmsum_fini(&dnode_sums.dnode_hold_alloc_lock_retry);
366 wmsum_fini(&dnode_sums.dnode_hold_alloc_lock_misses);
367 wmsum_fini(&dnode_sums.dnode_hold_alloc_type_none);
368 wmsum_fini(&dnode_sums.dnode_hold_free_hits);
369 wmsum_fini(&dnode_sums.dnode_hold_free_misses);
370 wmsum_fini(&dnode_sums.dnode_hold_free_lock_misses);
371 wmsum_fini(&dnode_sums.dnode_hold_free_lock_retry);
372 wmsum_fini(&dnode_sums.dnode_hold_free_refcount);
373 wmsum_fini(&dnode_sums.dnode_hold_free_overflow);
374 wmsum_fini(&dnode_sums.dnode_free_interior_lock_retry);
375 wmsum_fini(&dnode_sums.dnode_allocate);
376 wmsum_fini(&dnode_sums.dnode_reallocate);
377 wmsum_fini(&dnode_sums.dnode_buf_evict);
378 wmsum_fini(&dnode_sums.dnode_alloc_next_chunk);
379 wmsum_fini(&dnode_sums.dnode_alloc_race);
380 wmsum_fini(&dnode_sums.dnode_alloc_next_block);
381 wmsum_fini(&dnode_sums.dnode_move_invalid);
382 wmsum_fini(&dnode_sums.dnode_move_recheck1);
383 wmsum_fini(&dnode_sums.dnode_move_recheck2);
384 wmsum_fini(&dnode_sums.dnode_move_special);
385 wmsum_fini(&dnode_sums.dnode_move_handle);
386 wmsum_fini(&dnode_sums.dnode_move_rwlock);
387 wmsum_fini(&dnode_sums.dnode_move_active);
388
389 kmem_cache_destroy(dnode_cache);
390 dnode_cache = NULL;
391 }
392
393
394 #ifdef ZFS_DEBUG
395 void
dnode_verify(dnode_t * dn)396 dnode_verify(dnode_t *dn)
397 {
398 int drop_struct_lock = FALSE;
399
400 ASSERT(dn->dn_phys);
401 ASSERT(dn->dn_objset);
402 ASSERT(dn->dn_handle->dnh_dnode == dn);
403
404 ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
405
406 if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
407 return;
408
409 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
410 rw_enter(&dn->dn_struct_rwlock, RW_READER);
411 drop_struct_lock = TRUE;
412 }
413 if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
414 int i;
415 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
416 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
417 if (dn->dn_datablkshift) {
418 ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
419 ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
420 ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
421 }
422 ASSERT3U(dn->dn_nlevels, <=, 30);
423 ASSERT(DMU_OT_IS_VALID(dn->dn_type));
424 ASSERT3U(dn->dn_nblkptr, >=, 1);
425 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
426 ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
427 ASSERT3U(dn->dn_datablksz, ==,
428 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
429 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
430 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
431 dn->dn_bonuslen, <=, max_bonuslen);
432 for (i = 0; i < TXG_SIZE; i++) {
433 ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
434 }
435 }
436 if (dn->dn_phys->dn_type != DMU_OT_NONE)
437 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
438 ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
439 if (dn->dn_dbuf != NULL) {
440 ASSERT3P(dn->dn_phys, ==,
441 (dnode_phys_t *)dn->dn_dbuf->db.db_data +
442 (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
443 }
444 if (drop_struct_lock)
445 rw_exit(&dn->dn_struct_rwlock);
446 }
447 #endif
448
449 void
dnode_byteswap(dnode_phys_t * dnp)450 dnode_byteswap(dnode_phys_t *dnp)
451 {
452 uint64_t *buf64 = (void*)&dnp->dn_blkptr;
453 int i;
454
455 if (dnp->dn_type == DMU_OT_NONE) {
456 memset(dnp, 0, sizeof (dnode_phys_t));
457 return;
458 }
459
460 dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
461 dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
462 dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
463 dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
464 dnp->dn_used = BSWAP_64(dnp->dn_used);
465
466 /*
467 * dn_nblkptr is only one byte, so it's OK to read it in either
468 * byte order. We can't read dn_bouslen.
469 */
470 ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
471 ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
472 for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
473 buf64[i] = BSWAP_64(buf64[i]);
474
475 /*
476 * OK to check dn_bonuslen for zero, because it won't matter if
477 * we have the wrong byte order. This is necessary because the
478 * dnode dnode is smaller than a regular dnode.
479 */
480 if (dnp->dn_bonuslen != 0) {
481 dmu_object_byteswap_t byteswap;
482 ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
483 byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
484 dmu_ot_byteswap[byteswap].ob_func(DN_BONUS(dnp),
485 DN_MAX_BONUS_LEN(dnp));
486 }
487
488 /* Swap SPILL block if we have one */
489 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
490 byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
491 }
492
493 void
dnode_buf_byteswap(void * vbuf,size_t size)494 dnode_buf_byteswap(void *vbuf, size_t size)
495 {
496 int i = 0;
497
498 ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
499 ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
500
501 while (i < size) {
502 dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
503 dnode_byteswap(dnp);
504
505 i += DNODE_MIN_SIZE;
506 if (dnp->dn_type != DMU_OT_NONE)
507 i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
508 }
509 }
510
511 void
dnode_setbonuslen(dnode_t * dn,int newsize,dmu_tx_t * tx)512 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
513 {
514 ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
515
516 dnode_setdirty(dn, tx);
517 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
518 ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
519 (dn->dn_nblkptr-1) * sizeof (blkptr_t));
520
521 if (newsize < dn->dn_bonuslen) {
522 /* clear any data after the end of the new size */
523 size_t diff = dn->dn_bonuslen - newsize;
524 char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
525 memset(data_end, 0, diff);
526 }
527
528 dn->dn_bonuslen = newsize;
529 if (newsize == 0)
530 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
531 else
532 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
533 rw_exit(&dn->dn_struct_rwlock);
534 }
535
536 void
dnode_setbonus_type(dnode_t * dn,dmu_object_type_t newtype,dmu_tx_t * tx)537 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
538 {
539 ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
540 dnode_setdirty(dn, tx);
541 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
542 dn->dn_bonustype = newtype;
543 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
544 rw_exit(&dn->dn_struct_rwlock);
545 }
546
547 void
dnode_set_storage_type(dnode_t * dn,dmu_object_type_t newtype)548 dnode_set_storage_type(dnode_t *dn, dmu_object_type_t newtype)
549 {
550 /*
551 * This is not in the dnode_phys, but it should be, and perhaps one day
552 * will. For now we require it be set after taking a hold.
553 */
554 ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
555 dn->dn_storage_type = newtype;
556 }
557
558 void
dnode_rm_spill(dnode_t * dn,dmu_tx_t * tx)559 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
560 {
561 ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
562 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
563 dnode_setdirty(dn, tx);
564 dn->dn_rm_spillblk[tx->tx_txg & TXG_MASK] = DN_KILL_SPILLBLK;
565 dn->dn_have_spill = B_FALSE;
566 }
567
568 static void
dnode_setdblksz(dnode_t * dn,int size)569 dnode_setdblksz(dnode_t *dn, int size)
570 {
571 ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
572 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
573 ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
574 ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
575 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
576 dn->dn_datablksz = size;
577 dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
578 dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
579 }
580
581 static dnode_t *
dnode_create(objset_t * os,dnode_phys_t * dnp,dmu_buf_impl_t * db,uint64_t object,dnode_handle_t * dnh)582 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
583 uint64_t object, dnode_handle_t *dnh)
584 {
585 dnode_t *dn;
586
587 dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
588 dn->dn_moved = 0;
589
590 /*
591 * Defer setting dn_objset until the dnode is ready to be a candidate
592 * for the dnode_move() callback.
593 */
594 dn->dn_object = object;
595 dn->dn_dbuf = db;
596 dn->dn_handle = dnh;
597 dn->dn_phys = dnp;
598
599 if (dnp->dn_datablkszsec) {
600 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
601 } else {
602 dn->dn_datablksz = 0;
603 dn->dn_datablkszsec = 0;
604 dn->dn_datablkshift = 0;
605 }
606 dn->dn_indblkshift = dnp->dn_indblkshift;
607 dn->dn_nlevels = dnp->dn_nlevels;
608 dn->dn_type = dnp->dn_type;
609 dn->dn_nblkptr = dnp->dn_nblkptr;
610 dn->dn_checksum = dnp->dn_checksum;
611 dn->dn_compress = dnp->dn_compress;
612 dn->dn_bonustype = dnp->dn_bonustype;
613 dn->dn_bonuslen = dnp->dn_bonuslen;
614 dn->dn_num_slots = dnp->dn_extra_slots + 1;
615 dn->dn_maxblkid = dnp->dn_maxblkid;
616 dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
617 dn->dn_id_flags = 0;
618
619 dn->dn_storage_type = DMU_OT_NONE;
620
621 dmu_zfetch_init(&dn->dn_zfetch, dn);
622
623 ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
624 ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
625 ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
626
627 mutex_enter(&os->os_lock);
628
629 /*
630 * Exclude special dnodes from os_dnodes so an empty os_dnodes
631 * signifies that the special dnodes have no references from
632 * their children (the entries in os_dnodes). This allows
633 * dnode_destroy() to easily determine if the last child has
634 * been removed and then complete eviction of the objset.
635 */
636 if (!DMU_OBJECT_IS_SPECIAL(object))
637 list_insert_head(&os->os_dnodes, dn);
638 membar_producer();
639
640 /*
641 * Everything else must be valid before assigning dn_objset
642 * makes the dnode eligible for dnode_move().
643 */
644 dn->dn_objset = os;
645
646 dnh->dnh_dnode = dn;
647 mutex_exit(&os->os_lock);
648
649 arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE);
650
651 return (dn);
652 }
653
654 /*
655 * Caller must be holding the dnode handle, which is released upon return.
656 */
657 static void
dnode_destroy(dnode_t * dn)658 dnode_destroy(dnode_t *dn)
659 {
660 objset_t *os = dn->dn_objset;
661 boolean_t complete_os_eviction = B_FALSE;
662
663 ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
664
665 mutex_enter(&os->os_lock);
666 POINTER_INVALIDATE(&dn->dn_objset);
667 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
668 list_remove(&os->os_dnodes, dn);
669 complete_os_eviction =
670 list_is_empty(&os->os_dnodes) &&
671 list_link_active(&os->os_evicting_node);
672 }
673 mutex_exit(&os->os_lock);
674
675 /* the dnode can no longer move, so we can release the handle */
676 if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
677 zrl_remove(&dn->dn_handle->dnh_zrlock);
678
679 dn->dn_allocated_txg = 0;
680 dn->dn_free_txg = 0;
681 dn->dn_assigned_txg = 0;
682 dn->dn_dirty_txg = 0;
683
684 dn->dn_dirtyctx = 0;
685 dn->dn_dirtyctx_firstset = NULL;
686 if (dn->dn_bonus != NULL) {
687 mutex_enter(&dn->dn_bonus->db_mtx);
688 dbuf_destroy(dn->dn_bonus);
689 dn->dn_bonus = NULL;
690 }
691 dn->dn_zio = NULL;
692
693 dn->dn_have_spill = B_FALSE;
694 dn->dn_oldused = 0;
695 dn->dn_oldflags = 0;
696 dn->dn_olduid = 0;
697 dn->dn_oldgid = 0;
698 dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
699 dn->dn_newuid = 0;
700 dn->dn_newgid = 0;
701 dn->dn_newprojid = ZFS_DEFAULT_PROJID;
702 dn->dn_id_flags = 0;
703
704 dn->dn_storage_type = DMU_OT_NONE;
705
706 dmu_zfetch_fini(&dn->dn_zfetch);
707 kmem_cache_free(dnode_cache, dn);
708 arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE);
709
710 if (complete_os_eviction)
711 dmu_objset_evict_done(os);
712 }
713
714 void
dnode_allocate(dnode_t * dn,dmu_object_type_t ot,int blocksize,int ibs,dmu_object_type_t bonustype,int bonuslen,int dn_slots,dmu_tx_t * tx)715 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
716 dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
717 {
718 int i;
719
720 ASSERT3U(dn_slots, >, 0);
721 ASSERT3U(dn_slots << DNODE_SHIFT, <=,
722 spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
723 ASSERT3U(blocksize, <=,
724 spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
725 if (blocksize == 0)
726 blocksize = 1 << zfs_default_bs;
727 else
728 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
729
730 if (ibs == 0)
731 ibs = zfs_default_ibs;
732
733 ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
734
735 dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
736 dn->dn_objset, (u_longlong_t)dn->dn_object,
737 (u_longlong_t)tx->tx_txg, blocksize, ibs, dn_slots);
738 DNODE_STAT_BUMP(dnode_allocate);
739
740 ASSERT(dn->dn_type == DMU_OT_NONE);
741 ASSERT0(memcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)));
742 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
743 ASSERT(ot != DMU_OT_NONE);
744 ASSERT(DMU_OT_IS_VALID(ot));
745 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
746 (bonustype == DMU_OT_SA && bonuslen == 0) ||
747 (bonustype == DMU_OTN_UINT64_METADATA && bonuslen == 0) ||
748 (bonustype != DMU_OT_NONE && bonuslen != 0));
749 ASSERT(DMU_OT_IS_VALID(bonustype));
750 ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
751 ASSERT(dn->dn_type == DMU_OT_NONE);
752 ASSERT0(dn->dn_maxblkid);
753 ASSERT0(dn->dn_allocated_txg);
754 ASSERT0(dn->dn_assigned_txg);
755 ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
756 ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
757 ASSERT(avl_is_empty(&dn->dn_dbufs));
758
759 for (i = 0; i < TXG_SIZE; i++) {
760 ASSERT0(dn->dn_next_nblkptr[i]);
761 ASSERT0(dn->dn_next_nlevels[i]);
762 ASSERT0(dn->dn_next_indblkshift[i]);
763 ASSERT0(dn->dn_next_bonuslen[i]);
764 ASSERT0(dn->dn_next_bonustype[i]);
765 ASSERT0(dn->dn_rm_spillblk[i]);
766 ASSERT0(dn->dn_next_blksz[i]);
767 ASSERT0(dn->dn_next_maxblkid[i]);
768 ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
769 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
770 ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
771 }
772
773 dn->dn_type = ot;
774 dnode_setdblksz(dn, blocksize);
775 dn->dn_indblkshift = ibs;
776 dn->dn_nlevels = 1;
777 dn->dn_num_slots = dn_slots;
778 if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
779 dn->dn_nblkptr = 1;
780 else {
781 dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
782 1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
783 SPA_BLKPTRSHIFT));
784 }
785
786 dn->dn_bonustype = bonustype;
787 dn->dn_bonuslen = bonuslen;
788 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
789 dn->dn_compress = ZIO_COMPRESS_INHERIT;
790 dn->dn_dirtyctx = 0;
791
792 dn->dn_free_txg = 0;
793 dn->dn_dirtyctx_firstset = NULL;
794 dn->dn_dirty_txg = 0;
795
796 dn->dn_allocated_txg = tx->tx_txg;
797 dn->dn_id_flags = 0;
798
799 dnode_setdirty(dn, tx);
800 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
801 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
802 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
803 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
804 }
805
806 void
dnode_reallocate(dnode_t * dn,dmu_object_type_t ot,int blocksize,dmu_object_type_t bonustype,int bonuslen,int dn_slots,boolean_t keep_spill,dmu_tx_t * tx)807 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
808 dmu_object_type_t bonustype, int bonuslen, int dn_slots,
809 boolean_t keep_spill, dmu_tx_t *tx)
810 {
811 int nblkptr;
812
813 ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
814 ASSERT3U(blocksize, <=,
815 spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
816 ASSERT0(blocksize % SPA_MINBLOCKSIZE);
817 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
818 ASSERT(tx->tx_txg != 0);
819 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
820 (bonustype != DMU_OT_NONE && bonuslen != 0) ||
821 (bonustype == DMU_OT_SA && bonuslen == 0));
822 ASSERT(DMU_OT_IS_VALID(bonustype));
823 ASSERT3U(bonuslen, <=,
824 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
825 ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
826
827 dnode_free_interior_slots(dn);
828 DNODE_STAT_BUMP(dnode_reallocate);
829
830 /* clean up any unreferenced dbufs */
831 dnode_evict_dbufs(dn);
832
833 dn->dn_id_flags = 0;
834
835 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
836 dnode_setdirty(dn, tx);
837 if (dn->dn_datablksz != blocksize) {
838 /* change blocksize */
839 ASSERT0(dn->dn_maxblkid);
840 ASSERT(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
841 dnode_block_freed(dn, 0));
842
843 dnode_setdblksz(dn, blocksize);
844 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = blocksize;
845 }
846 if (dn->dn_bonuslen != bonuslen)
847 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = bonuslen;
848
849 if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
850 nblkptr = 1;
851 else
852 nblkptr = MIN(DN_MAX_NBLKPTR,
853 1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
854 SPA_BLKPTRSHIFT));
855 if (dn->dn_bonustype != bonustype)
856 dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = bonustype;
857 if (dn->dn_nblkptr != nblkptr)
858 dn->dn_next_nblkptr[tx->tx_txg & TXG_MASK] = nblkptr;
859 if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR && !keep_spill) {
860 dbuf_rm_spill(dn, tx);
861 dnode_rm_spill(dn, tx);
862 }
863
864 rw_exit(&dn->dn_struct_rwlock);
865
866 /* change type */
867 dn->dn_type = ot;
868
869 /* change bonus size and type */
870 mutex_enter(&dn->dn_mtx);
871 dn->dn_bonustype = bonustype;
872 dn->dn_bonuslen = bonuslen;
873 dn->dn_num_slots = dn_slots;
874 dn->dn_nblkptr = nblkptr;
875 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
876 dn->dn_compress = ZIO_COMPRESS_INHERIT;
877 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
878
879 /* fix up the bonus db_size */
880 if (dn->dn_bonus) {
881 dn->dn_bonus->db.db_size =
882 DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
883 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
884 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
885 }
886
887 dn->dn_allocated_txg = tx->tx_txg;
888 mutex_exit(&dn->dn_mtx);
889 }
890
891 #ifdef _KERNEL
892 static void
dnode_move_impl(dnode_t * odn,dnode_t * ndn)893 dnode_move_impl(dnode_t *odn, dnode_t *ndn)
894 {
895 ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
896 ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
897 ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
898
899 /* Copy fields. */
900 ndn->dn_objset = odn->dn_objset;
901 ndn->dn_object = odn->dn_object;
902 ndn->dn_dbuf = odn->dn_dbuf;
903 ndn->dn_handle = odn->dn_handle;
904 ndn->dn_phys = odn->dn_phys;
905 ndn->dn_type = odn->dn_type;
906 ndn->dn_bonuslen = odn->dn_bonuslen;
907 ndn->dn_bonustype = odn->dn_bonustype;
908 ndn->dn_nblkptr = odn->dn_nblkptr;
909 ndn->dn_checksum = odn->dn_checksum;
910 ndn->dn_compress = odn->dn_compress;
911 ndn->dn_nlevels = odn->dn_nlevels;
912 ndn->dn_indblkshift = odn->dn_indblkshift;
913 ndn->dn_datablkshift = odn->dn_datablkshift;
914 ndn->dn_datablkszsec = odn->dn_datablkszsec;
915 ndn->dn_datablksz = odn->dn_datablksz;
916 ndn->dn_maxblkid = odn->dn_maxblkid;
917 ndn->dn_num_slots = odn->dn_num_slots;
918 memcpy(ndn->dn_next_type, odn->dn_next_type,
919 sizeof (odn->dn_next_type));
920 memcpy(ndn->dn_next_nblkptr, odn->dn_next_nblkptr,
921 sizeof (odn->dn_next_nblkptr));
922 memcpy(ndn->dn_next_nlevels, odn->dn_next_nlevels,
923 sizeof (odn->dn_next_nlevels));
924 memcpy(ndn->dn_next_indblkshift, odn->dn_next_indblkshift,
925 sizeof (odn->dn_next_indblkshift));
926 memcpy(ndn->dn_next_bonustype, odn->dn_next_bonustype,
927 sizeof (odn->dn_next_bonustype));
928 memcpy(ndn->dn_rm_spillblk, odn->dn_rm_spillblk,
929 sizeof (odn->dn_rm_spillblk));
930 memcpy(ndn->dn_next_bonuslen, odn->dn_next_bonuslen,
931 sizeof (odn->dn_next_bonuslen));
932 memcpy(ndn->dn_next_blksz, odn->dn_next_blksz,
933 sizeof (odn->dn_next_blksz));
934 memcpy(ndn->dn_next_maxblkid, odn->dn_next_maxblkid,
935 sizeof (odn->dn_next_maxblkid));
936 for (int i = 0; i < TXG_SIZE; i++) {
937 list_move_tail(&ndn->dn_dirty_records[i],
938 &odn->dn_dirty_records[i]);
939 }
940 memcpy(ndn->dn_free_ranges, odn->dn_free_ranges,
941 sizeof (odn->dn_free_ranges));
942 ndn->dn_allocated_txg = odn->dn_allocated_txg;
943 ndn->dn_free_txg = odn->dn_free_txg;
944 ndn->dn_assigned_txg = odn->dn_assigned_txg;
945 ndn->dn_dirty_txg = odn->dn_dirty_txg;
946 ndn->dn_dirtyctx = odn->dn_dirtyctx;
947 ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
948 ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
949 zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
950 ASSERT(avl_is_empty(&ndn->dn_dbufs));
951 avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
952 ndn->dn_dbufs_count = odn->dn_dbufs_count;
953 ndn->dn_bonus = odn->dn_bonus;
954 ndn->dn_have_spill = odn->dn_have_spill;
955 ndn->dn_zio = odn->dn_zio;
956 ndn->dn_oldused = odn->dn_oldused;
957 ndn->dn_oldflags = odn->dn_oldflags;
958 ndn->dn_olduid = odn->dn_olduid;
959 ndn->dn_oldgid = odn->dn_oldgid;
960 ndn->dn_oldprojid = odn->dn_oldprojid;
961 ndn->dn_newuid = odn->dn_newuid;
962 ndn->dn_newgid = odn->dn_newgid;
963 ndn->dn_newprojid = odn->dn_newprojid;
964 ndn->dn_id_flags = odn->dn_id_flags;
965 ndn->dn_storage_type = odn->dn_storage_type;
966 dmu_zfetch_init(&ndn->dn_zfetch, ndn);
967
968 /*
969 * Update back pointers. Updating the handle fixes the back pointer of
970 * every descendant dbuf as well as the bonus dbuf.
971 */
972 ASSERT(ndn->dn_handle->dnh_dnode == odn);
973 ndn->dn_handle->dnh_dnode = ndn;
974
975 /*
976 * Invalidate the original dnode by clearing all of its back pointers.
977 */
978 odn->dn_dbuf = NULL;
979 odn->dn_handle = NULL;
980 avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
981 offsetof(dmu_buf_impl_t, db_link));
982 odn->dn_dbufs_count = 0;
983 odn->dn_bonus = NULL;
984 dmu_zfetch_fini(&odn->dn_zfetch);
985
986 /*
987 * Set the low bit of the objset pointer to ensure that dnode_move()
988 * recognizes the dnode as invalid in any subsequent callback.
989 */
990 POINTER_INVALIDATE(&odn->dn_objset);
991
992 /*
993 * Satisfy the destructor.
994 */
995 for (int i = 0; i < TXG_SIZE; i++) {
996 list_create(&odn->dn_dirty_records[i],
997 sizeof (dbuf_dirty_record_t),
998 offsetof(dbuf_dirty_record_t, dr_dirty_node));
999 odn->dn_free_ranges[i] = NULL;
1000 odn->dn_next_nlevels[i] = 0;
1001 odn->dn_next_indblkshift[i] = 0;
1002 odn->dn_next_bonustype[i] = 0;
1003 odn->dn_rm_spillblk[i] = 0;
1004 odn->dn_next_bonuslen[i] = 0;
1005 odn->dn_next_blksz[i] = 0;
1006 }
1007 odn->dn_allocated_txg = 0;
1008 odn->dn_free_txg = 0;
1009 odn->dn_assigned_txg = 0;
1010 odn->dn_dirty_txg = 0;
1011 odn->dn_dirtyctx = 0;
1012 odn->dn_dirtyctx_firstset = NULL;
1013 odn->dn_have_spill = B_FALSE;
1014 odn->dn_zio = NULL;
1015 odn->dn_oldused = 0;
1016 odn->dn_oldflags = 0;
1017 odn->dn_olduid = 0;
1018 odn->dn_oldgid = 0;
1019 odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
1020 odn->dn_newuid = 0;
1021 odn->dn_newgid = 0;
1022 odn->dn_newprojid = ZFS_DEFAULT_PROJID;
1023 odn->dn_id_flags = 0;
1024 odn->dn_storage_type = DMU_OT_NONE;
1025
1026 /*
1027 * Mark the dnode.
1028 */
1029 ndn->dn_moved = 1;
1030 odn->dn_moved = (uint8_t)-1;
1031 }
1032
1033 static kmem_cbrc_t
dnode_move(void * buf,void * newbuf,size_t size,void * arg)1034 dnode_move(void *buf, void *newbuf, size_t size, void *arg)
1035 {
1036 dnode_t *odn = buf, *ndn = newbuf;
1037 objset_t *os;
1038 int64_t refcount;
1039 uint32_t dbufs;
1040
1041 #ifndef USE_DNODE_HANDLE
1042 /*
1043 * We can't move dnodes if dbufs reference them directly without
1044 * using handles and respecitve locking. Unless USE_DNODE_HANDLE
1045 * is defined the code below is only to make sure it still builds,
1046 * but it should never be used, since it is unsafe.
1047 */
1048 #ifdef ZFS_DEBUG
1049 PANIC("dnode_move() called without USE_DNODE_HANDLE");
1050 #endif
1051 return (KMEM_CBRC_NO);
1052 #endif
1053
1054 /*
1055 * The dnode is on the objset's list of known dnodes if the objset
1056 * pointer is valid. We set the low bit of the objset pointer when
1057 * freeing the dnode to invalidate it, and the memory patterns written
1058 * by kmem (baddcafe and deadbeef) set at least one of the two low bits.
1059 * A newly created dnode sets the objset pointer last of all to indicate
1060 * that the dnode is known and in a valid state to be moved by this
1061 * function.
1062 */
1063 os = odn->dn_objset;
1064 if (!POINTER_IS_VALID(os)) {
1065 DNODE_STAT_BUMP(dnode_move_invalid);
1066 return (KMEM_CBRC_DONT_KNOW);
1067 }
1068
1069 /*
1070 * Ensure that the objset does not go away during the move.
1071 */
1072 rw_enter(&os_lock, RW_WRITER);
1073 if (os != odn->dn_objset) {
1074 rw_exit(&os_lock);
1075 DNODE_STAT_BUMP(dnode_move_recheck1);
1076 return (KMEM_CBRC_DONT_KNOW);
1077 }
1078
1079 /*
1080 * If the dnode is still valid, then so is the objset. We know that no
1081 * valid objset can be freed while we hold os_lock, so we can safely
1082 * ensure that the objset remains in use.
1083 */
1084 mutex_enter(&os->os_lock);
1085
1086 /*
1087 * Recheck the objset pointer in case the dnode was removed just before
1088 * acquiring the lock.
1089 */
1090 if (os != odn->dn_objset) {
1091 mutex_exit(&os->os_lock);
1092 rw_exit(&os_lock);
1093 DNODE_STAT_BUMP(dnode_move_recheck2);
1094 return (KMEM_CBRC_DONT_KNOW);
1095 }
1096
1097 /*
1098 * At this point we know that as long as we hold os->os_lock, the dnode
1099 * cannot be freed and fields within the dnode can be safely accessed.
1100 * The objset listing this dnode cannot go away as long as this dnode is
1101 * on its list.
1102 */
1103 rw_exit(&os_lock);
1104 if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
1105 mutex_exit(&os->os_lock);
1106 DNODE_STAT_BUMP(dnode_move_special);
1107 return (KMEM_CBRC_NO);
1108 }
1109 ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
1110
1111 /*
1112 * Lock the dnode handle to prevent the dnode from obtaining any new
1113 * holds. This also prevents the descendant dbufs and the bonus dbuf
1114 * from accessing the dnode, so that we can discount their holds. The
1115 * handle is safe to access because we know that while the dnode cannot
1116 * go away, neither can its handle. Once we hold dnh_zrlock, we can
1117 * safely move any dnode referenced only by dbufs.
1118 */
1119 if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
1120 mutex_exit(&os->os_lock);
1121 DNODE_STAT_BUMP(dnode_move_handle);
1122 return (KMEM_CBRC_LATER);
1123 }
1124
1125 /*
1126 * Ensure a consistent view of the dnode's holds and the dnode's dbufs.
1127 * We need to guarantee that there is a hold for every dbuf in order to
1128 * determine whether the dnode is actively referenced. Falsely matching
1129 * a dbuf to an active hold would lead to an unsafe move. It's possible
1130 * that a thread already having an active dnode hold is about to add a
1131 * dbuf, and we can't compare hold and dbuf counts while the add is in
1132 * progress.
1133 */
1134 if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
1135 zrl_exit(&odn->dn_handle->dnh_zrlock);
1136 mutex_exit(&os->os_lock);
1137 DNODE_STAT_BUMP(dnode_move_rwlock);
1138 return (KMEM_CBRC_LATER);
1139 }
1140
1141 /*
1142 * A dbuf may be removed (evicted) without an active dnode hold. In that
1143 * case, the dbuf count is decremented under the handle lock before the
1144 * dbuf's hold is released. This order ensures that if we count the hold
1145 * after the dbuf is removed but before its hold is released, we will
1146 * treat the unmatched hold as active and exit safely. If we count the
1147 * hold before the dbuf is removed, the hold is discounted, and the
1148 * removal is blocked until the move completes.
1149 */
1150 refcount = zfs_refcount_count(&odn->dn_holds);
1151 ASSERT(refcount >= 0);
1152 dbufs = DN_DBUFS_COUNT(odn);
1153
1154 /* We can't have more dbufs than dnode holds. */
1155 ASSERT3U(dbufs, <=, refcount);
1156 DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
1157 uint32_t, dbufs);
1158
1159 if (refcount > dbufs) {
1160 rw_exit(&odn->dn_struct_rwlock);
1161 zrl_exit(&odn->dn_handle->dnh_zrlock);
1162 mutex_exit(&os->os_lock);
1163 DNODE_STAT_BUMP(dnode_move_active);
1164 return (KMEM_CBRC_LATER);
1165 }
1166
1167 rw_exit(&odn->dn_struct_rwlock);
1168
1169 /*
1170 * At this point we know that anyone with a hold on the dnode is not
1171 * actively referencing it. The dnode is known and in a valid state to
1172 * move. We're holding the locks needed to execute the critical section.
1173 */
1174 dnode_move_impl(odn, ndn);
1175
1176 list_link_replace(&odn->dn_link, &ndn->dn_link);
1177 /* If the dnode was safe to move, the refcount cannot have changed. */
1178 ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
1179 ASSERT(dbufs == DN_DBUFS_COUNT(ndn));
1180 zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
1181 mutex_exit(&os->os_lock);
1182
1183 return (KMEM_CBRC_YES);
1184 }
1185 #endif /* _KERNEL */
1186
1187 static void
dnode_slots_hold(dnode_children_t * children,int idx,int slots)1188 dnode_slots_hold(dnode_children_t *children, int idx, int slots)
1189 {
1190 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1191
1192 for (int i = idx; i < idx + slots; i++) {
1193 dnode_handle_t *dnh = &children->dnc_children[i];
1194 zrl_add(&dnh->dnh_zrlock);
1195 }
1196 }
1197
1198 static void
dnode_slots_rele(dnode_children_t * children,int idx,int slots)1199 dnode_slots_rele(dnode_children_t *children, int idx, int slots)
1200 {
1201 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1202
1203 for (int i = idx; i < idx + slots; i++) {
1204 dnode_handle_t *dnh = &children->dnc_children[i];
1205
1206 if (zrl_is_locked(&dnh->dnh_zrlock))
1207 zrl_exit(&dnh->dnh_zrlock);
1208 else
1209 zrl_remove(&dnh->dnh_zrlock);
1210 }
1211 }
1212
1213 static int
dnode_slots_tryenter(dnode_children_t * children,int idx,int slots)1214 dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
1215 {
1216 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1217
1218 for (int i = idx; i < idx + slots; i++) {
1219 dnode_handle_t *dnh = &children->dnc_children[i];
1220
1221 if (!zrl_tryenter(&dnh->dnh_zrlock)) {
1222 for (int j = idx; j < i; j++) {
1223 dnh = &children->dnc_children[j];
1224 zrl_exit(&dnh->dnh_zrlock);
1225 }
1226
1227 return (0);
1228 }
1229 }
1230
1231 return (1);
1232 }
1233
1234 static void
dnode_set_slots(dnode_children_t * children,int idx,int slots,void * ptr)1235 dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
1236 {
1237 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1238
1239 for (int i = idx; i < idx + slots; i++) {
1240 dnode_handle_t *dnh = &children->dnc_children[i];
1241 dnh->dnh_dnode = ptr;
1242 }
1243 }
1244
1245 static boolean_t
dnode_check_slots_free(dnode_children_t * children,int idx,int slots)1246 dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
1247 {
1248 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1249
1250 /*
1251 * If all dnode slots are either already free or
1252 * evictable return B_TRUE.
1253 */
1254 for (int i = idx; i < idx + slots; i++) {
1255 dnode_handle_t *dnh = &children->dnc_children[i];
1256 dnode_t *dn = dnh->dnh_dnode;
1257
1258 if (dn == DN_SLOT_FREE) {
1259 continue;
1260 } else if (DN_SLOT_IS_PTR(dn)) {
1261 mutex_enter(&dn->dn_mtx);
1262 boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
1263 zfs_refcount_is_zero(&dn->dn_holds) &&
1264 !DNODE_IS_DIRTY(dn));
1265 mutex_exit(&dn->dn_mtx);
1266
1267 if (!can_free)
1268 return (B_FALSE);
1269 else
1270 continue;
1271 } else {
1272 return (B_FALSE);
1273 }
1274 }
1275
1276 return (B_TRUE);
1277 }
1278
1279 static uint_t
dnode_reclaim_slots(dnode_children_t * children,int idx,int slots)1280 dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
1281 {
1282 uint_t reclaimed = 0;
1283
1284 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1285
1286 for (int i = idx; i < idx + slots; i++) {
1287 dnode_handle_t *dnh = &children->dnc_children[i];
1288
1289 ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
1290
1291 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1292 ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
1293 dnode_destroy(dnh->dnh_dnode);
1294 dnh->dnh_dnode = DN_SLOT_FREE;
1295 reclaimed++;
1296 }
1297 }
1298
1299 return (reclaimed);
1300 }
1301
1302 void
dnode_free_interior_slots(dnode_t * dn)1303 dnode_free_interior_slots(dnode_t *dn)
1304 {
1305 dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
1306 int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
1307 int idx = (dn->dn_object & (epb - 1)) + 1;
1308 int slots = dn->dn_num_slots - 1;
1309
1310 if (slots == 0)
1311 return;
1312
1313 ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1314
1315 while (!dnode_slots_tryenter(children, idx, slots)) {
1316 DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
1317 kpreempt(KPREEMPT_SYNC);
1318 }
1319
1320 dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
1321 dnode_slots_rele(children, idx, slots);
1322 }
1323
1324 void
dnode_special_close(dnode_handle_t * dnh)1325 dnode_special_close(dnode_handle_t *dnh)
1326 {
1327 dnode_t *dn = dnh->dnh_dnode;
1328
1329 /*
1330 * Ensure dnode_rele_and_unlock() has released dn_mtx, after final
1331 * zfs_refcount_remove()
1332 */
1333 mutex_enter(&dn->dn_mtx);
1334 if (zfs_refcount_count(&dn->dn_holds) > 0)
1335 cv_wait(&dn->dn_nodnholds, &dn->dn_mtx);
1336 mutex_exit(&dn->dn_mtx);
1337 ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
1338
1339 ASSERT(dn->dn_dbuf == NULL ||
1340 dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
1341 zrl_add(&dnh->dnh_zrlock);
1342 dnode_destroy(dn); /* implicit zrl_remove() */
1343 zrl_destroy(&dnh->dnh_zrlock);
1344 dnh->dnh_dnode = NULL;
1345 }
1346
1347 void
dnode_special_open(objset_t * os,dnode_phys_t * dnp,uint64_t object,dnode_handle_t * dnh)1348 dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
1349 dnode_handle_t *dnh)
1350 {
1351 dnode_t *dn;
1352
1353 zrl_init(&dnh->dnh_zrlock);
1354 VERIFY3U(1, ==, zrl_tryenter(&dnh->dnh_zrlock));
1355
1356 dn = dnode_create(os, dnp, NULL, object, dnh);
1357 DNODE_VERIFY(dn);
1358
1359 zrl_exit(&dnh->dnh_zrlock);
1360 }
1361
1362 static void
dnode_buf_evict_async(void * dbu)1363 dnode_buf_evict_async(void *dbu)
1364 {
1365 dnode_children_t *dnc = dbu;
1366
1367 DNODE_STAT_BUMP(dnode_buf_evict);
1368
1369 for (int i = 0; i < dnc->dnc_count; i++) {
1370 dnode_handle_t *dnh = &dnc->dnc_children[i];
1371 dnode_t *dn;
1372
1373 /*
1374 * The dnode handle lock guards against the dnode moving to
1375 * another valid address, so there is no need here to guard
1376 * against changes to or from NULL.
1377 */
1378 if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1379 zrl_destroy(&dnh->dnh_zrlock);
1380 dnh->dnh_dnode = DN_SLOT_UNINIT;
1381 continue;
1382 }
1383
1384 zrl_add(&dnh->dnh_zrlock);
1385 dn = dnh->dnh_dnode;
1386 /*
1387 * If there are holds on this dnode, then there should
1388 * be holds on the dnode's containing dbuf as well; thus
1389 * it wouldn't be eligible for eviction and this function
1390 * would not have been called.
1391 */
1392 ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
1393 ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
1394
1395 dnode_destroy(dn); /* implicit zrl_remove() for first slot */
1396 zrl_destroy(&dnh->dnh_zrlock);
1397 dnh->dnh_dnode = DN_SLOT_UNINIT;
1398 }
1399 kmem_free(dnc, sizeof (dnode_children_t) +
1400 dnc->dnc_count * sizeof (dnode_handle_t));
1401 }
1402
1403 /*
1404 * When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
1405 * to ensure the hole at the specified object offset is large enough to
1406 * hold the dnode being created. The slots parameter is also used to ensure
1407 * a dnode does not span multiple dnode blocks. In both of these cases, if
1408 * a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
1409 * are only possible when using DNODE_MUST_BE_FREE.
1410 *
1411 * If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
1412 * dnode_hold_impl() will check if the requested dnode is already consumed
1413 * as an extra dnode slot by an large dnode, in which case it returns
1414 * ENOENT.
1415 *
1416 * If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
1417 * return whether the hold would succeed or not. tag and dnp should set to
1418 * NULL in this case.
1419 *
1420 * errors:
1421 * EINVAL - Invalid object number or flags.
1422 * ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
1423 * EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
1424 * - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
1425 * - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
1426 * ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
1427 * - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
1428 * EIO - I/O error when reading the meta dnode dbuf.
1429 *
1430 * succeeds even for free dnodes.
1431 */
1432 int
dnode_hold_impl(objset_t * os,uint64_t object,int flag,int slots,const void * tag,dnode_t ** dnp)1433 dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
1434 const void *tag, dnode_t **dnp)
1435 {
1436 int epb, idx, err;
1437 int drop_struct_lock = FALSE;
1438 int type;
1439 uint64_t blk;
1440 dnode_t *mdn, *dn;
1441 dmu_buf_impl_t *db;
1442 dnode_children_t *dnc;
1443 dnode_phys_t *dn_block;
1444 dnode_handle_t *dnh;
1445
1446 ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
1447 ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
1448 IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
1449
1450 /*
1451 * If you are holding the spa config lock as writer, you shouldn't
1452 * be asking the DMU to do *anything* unless it's the root pool
1453 * which may require us to read from the root filesystem while
1454 * holding some (not all) of the locks as writer.
1455 */
1456 ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1457 (spa_is_root(os->os_spa) &&
1458 spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1459
1460 ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
1461
1462 if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
1463 object == DMU_PROJECTUSED_OBJECT) {
1464 if (object == DMU_USERUSED_OBJECT)
1465 dn = DMU_USERUSED_DNODE(os);
1466 else if (object == DMU_GROUPUSED_OBJECT)
1467 dn = DMU_GROUPUSED_DNODE(os);
1468 else
1469 dn = DMU_PROJECTUSED_DNODE(os);
1470 if (dn == NULL)
1471 return (SET_ERROR(ENOENT));
1472 type = dn->dn_type;
1473 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1474 return (SET_ERROR(ENOENT));
1475 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1476 return (SET_ERROR(EEXIST));
1477 DNODE_VERIFY(dn);
1478 /* Don't actually hold if dry run, just return 0 */
1479 if (!(flag & DNODE_DRY_RUN)) {
1480 (void) zfs_refcount_add(&dn->dn_holds, tag);
1481 *dnp = dn;
1482 }
1483 return (0);
1484 }
1485
1486 if (object == 0 || object >= DN_MAX_OBJECT)
1487 return (SET_ERROR(EINVAL));
1488
1489 mdn = DMU_META_DNODE(os);
1490 ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1491
1492 DNODE_VERIFY(mdn);
1493
1494 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1495 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1496 drop_struct_lock = TRUE;
1497 }
1498
1499 blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
1500 db = dbuf_hold(mdn, blk, FTAG);
1501 if (drop_struct_lock)
1502 rw_exit(&mdn->dn_struct_rwlock);
1503 if (db == NULL) {
1504 DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
1505 return (SET_ERROR(EIO));
1506 }
1507
1508 /*
1509 * We do not need to decrypt to read the dnode so it doesn't matter
1510 * if we get the encrypted or decrypted version.
1511 */
1512 err = dbuf_read(db, NULL, DB_RF_CANFAIL |
1513 DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
1514 if (err) {
1515 DNODE_STAT_BUMP(dnode_hold_dbuf_read);
1516 dbuf_rele(db, FTAG);
1517 return (err);
1518 }
1519
1520 ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
1521 epb = db->db.db_size >> DNODE_SHIFT;
1522
1523 idx = object & (epb - 1);
1524 dn_block = (dnode_phys_t *)db->db.db_data;
1525
1526 ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
1527 dnc = dmu_buf_get_user(&db->db);
1528 dnh = NULL;
1529 if (dnc == NULL) {
1530 dnode_children_t *winner;
1531 int skip = 0;
1532
1533 dnc = kmem_zalloc(sizeof (dnode_children_t) +
1534 epb * sizeof (dnode_handle_t), KM_SLEEP);
1535 dnc->dnc_count = epb;
1536 dnh = &dnc->dnc_children[0];
1537
1538 /* Initialize dnode slot status from dnode_phys_t */
1539 for (int i = 0; i < epb; i++) {
1540 zrl_init(&dnh[i].dnh_zrlock);
1541
1542 if (skip) {
1543 skip--;
1544 continue;
1545 }
1546
1547 if (dn_block[i].dn_type != DMU_OT_NONE) {
1548 int interior = dn_block[i].dn_extra_slots;
1549
1550 dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
1551 dnode_set_slots(dnc, i + 1, interior,
1552 DN_SLOT_INTERIOR);
1553 skip = interior;
1554 } else {
1555 dnh[i].dnh_dnode = DN_SLOT_FREE;
1556 skip = 0;
1557 }
1558 }
1559
1560 dmu_buf_init_user(&dnc->dnc_dbu, NULL,
1561 dnode_buf_evict_async, NULL);
1562 winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
1563 if (winner != NULL) {
1564
1565 for (int i = 0; i < epb; i++)
1566 zrl_destroy(&dnh[i].dnh_zrlock);
1567
1568 kmem_free(dnc, sizeof (dnode_children_t) +
1569 epb * sizeof (dnode_handle_t));
1570 dnc = winner;
1571 }
1572 }
1573
1574 ASSERT(dnc->dnc_count == epb);
1575
1576 if (flag & DNODE_MUST_BE_ALLOCATED) {
1577 slots = 1;
1578
1579 dnode_slots_hold(dnc, idx, slots);
1580 dnh = &dnc->dnc_children[idx];
1581
1582 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1583 dn = dnh->dnh_dnode;
1584 } else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
1585 DNODE_STAT_BUMP(dnode_hold_alloc_interior);
1586 dnode_slots_rele(dnc, idx, slots);
1587 dbuf_rele(db, FTAG);
1588 return (SET_ERROR(EEXIST));
1589 } else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
1590 DNODE_STAT_BUMP(dnode_hold_alloc_misses);
1591 dnode_slots_rele(dnc, idx, slots);
1592 dbuf_rele(db, FTAG);
1593 return (SET_ERROR(ENOENT));
1594 } else {
1595 dnode_slots_rele(dnc, idx, slots);
1596 while (!dnode_slots_tryenter(dnc, idx, slots)) {
1597 DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
1598 kpreempt(KPREEMPT_SYNC);
1599 }
1600
1601 /*
1602 * Someone else won the race and called dnode_create()
1603 * after we checked DN_SLOT_IS_PTR() above but before
1604 * we acquired the lock.
1605 */
1606 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1607 DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
1608 dn = dnh->dnh_dnode;
1609 } else {
1610 dn = dnode_create(os, dn_block + idx, db,
1611 object, dnh);
1612 dmu_buf_add_user_size(&db->db,
1613 sizeof (dnode_t));
1614 }
1615 }
1616
1617 mutex_enter(&dn->dn_mtx);
1618 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
1619 DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
1620 mutex_exit(&dn->dn_mtx);
1621 dnode_slots_rele(dnc, idx, slots);
1622 dbuf_rele(db, FTAG);
1623 return (SET_ERROR(ENOENT));
1624 }
1625
1626 /* Don't actually hold if dry run, just return 0 */
1627 if (flag & DNODE_DRY_RUN) {
1628 mutex_exit(&dn->dn_mtx);
1629 dnode_slots_rele(dnc, idx, slots);
1630 dbuf_rele(db, FTAG);
1631 return (0);
1632 }
1633
1634 DNODE_STAT_BUMP(dnode_hold_alloc_hits);
1635 } else if (flag & DNODE_MUST_BE_FREE) {
1636
1637 if (idx + slots - 1 >= DNODES_PER_BLOCK) {
1638 DNODE_STAT_BUMP(dnode_hold_free_overflow);
1639 dbuf_rele(db, FTAG);
1640 return (SET_ERROR(ENOSPC));
1641 }
1642
1643 dnode_slots_hold(dnc, idx, slots);
1644
1645 if (!dnode_check_slots_free(dnc, idx, slots)) {
1646 DNODE_STAT_BUMP(dnode_hold_free_misses);
1647 dnode_slots_rele(dnc, idx, slots);
1648 dbuf_rele(db, FTAG);
1649 return (SET_ERROR(ENOSPC));
1650 }
1651
1652 dnode_slots_rele(dnc, idx, slots);
1653 while (!dnode_slots_tryenter(dnc, idx, slots)) {
1654 DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
1655 kpreempt(KPREEMPT_SYNC);
1656 }
1657
1658 if (!dnode_check_slots_free(dnc, idx, slots)) {
1659 DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
1660 dnode_slots_rele(dnc, idx, slots);
1661 dbuf_rele(db, FTAG);
1662 return (SET_ERROR(ENOSPC));
1663 }
1664
1665 /*
1666 * Allocated but otherwise free dnodes which would
1667 * be in the interior of a multi-slot dnodes need
1668 * to be freed. Single slot dnodes can be safely
1669 * re-purposed as a performance optimization.
1670 */
1671 if (slots > 1) {
1672 uint_t reclaimed =
1673 dnode_reclaim_slots(dnc, idx + 1, slots - 1);
1674 if (reclaimed > 0)
1675 dmu_buf_sub_user_size(&db->db,
1676 reclaimed * sizeof (dnode_t));
1677 }
1678
1679 dnh = &dnc->dnc_children[idx];
1680 if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1681 dn = dnh->dnh_dnode;
1682 } else {
1683 dn = dnode_create(os, dn_block + idx, db,
1684 object, dnh);
1685 dmu_buf_add_user_size(&db->db, sizeof (dnode_t));
1686 }
1687
1688 mutex_enter(&dn->dn_mtx);
1689 if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
1690 DNODE_STAT_BUMP(dnode_hold_free_refcount);
1691 mutex_exit(&dn->dn_mtx);
1692 dnode_slots_rele(dnc, idx, slots);
1693 dbuf_rele(db, FTAG);
1694 return (SET_ERROR(EEXIST));
1695 }
1696
1697 /* Don't actually hold if dry run, just return 0 */
1698 if (flag & DNODE_DRY_RUN) {
1699 mutex_exit(&dn->dn_mtx);
1700 dnode_slots_rele(dnc, idx, slots);
1701 dbuf_rele(db, FTAG);
1702 return (0);
1703 }
1704
1705 dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
1706 DNODE_STAT_BUMP(dnode_hold_free_hits);
1707 } else {
1708 dbuf_rele(db, FTAG);
1709 return (SET_ERROR(EINVAL));
1710 }
1711
1712 ASSERT0(dn->dn_free_txg);
1713
1714 if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
1715 dbuf_add_ref(db, dnh);
1716
1717 mutex_exit(&dn->dn_mtx);
1718
1719 /* Now we can rely on the hold to prevent the dnode from moving. */
1720 dnode_slots_rele(dnc, idx, slots);
1721
1722 DNODE_VERIFY(dn);
1723 ASSERT3P(dnp, !=, NULL);
1724 ASSERT3P(dn->dn_dbuf, ==, db);
1725 ASSERT3U(dn->dn_object, ==, object);
1726 dbuf_rele(db, FTAG);
1727
1728 *dnp = dn;
1729 return (0);
1730 }
1731
1732 /*
1733 * Return held dnode if the object is allocated, NULL if not.
1734 */
1735 int
dnode_hold(objset_t * os,uint64_t object,const void * tag,dnode_t ** dnp)1736 dnode_hold(objset_t *os, uint64_t object, const void *tag, dnode_t **dnp)
1737 {
1738 return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
1739 dnp));
1740 }
1741
1742 /*
1743 * Can only add a reference if there is already at least one
1744 * reference on the dnode. Returns FALSE if unable to add a
1745 * new reference.
1746 */
1747 boolean_t
dnode_add_ref(dnode_t * dn,const void * tag)1748 dnode_add_ref(dnode_t *dn, const void *tag)
1749 {
1750 mutex_enter(&dn->dn_mtx);
1751 if (zfs_refcount_is_zero(&dn->dn_holds)) {
1752 mutex_exit(&dn->dn_mtx);
1753 return (FALSE);
1754 }
1755 VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
1756 mutex_exit(&dn->dn_mtx);
1757 return (TRUE);
1758 }
1759
1760 void
dnode_rele(dnode_t * dn,const void * tag)1761 dnode_rele(dnode_t *dn, const void *tag)
1762 {
1763 mutex_enter(&dn->dn_mtx);
1764 dnode_rele_and_unlock(dn, tag, B_FALSE);
1765 }
1766
1767 void
dnode_rele_and_unlock(dnode_t * dn,const void * tag,boolean_t evicting)1768 dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting)
1769 {
1770 uint64_t refs;
1771 /* Get while the hold prevents the dnode from moving. */
1772 dmu_buf_impl_t *db = dn->dn_dbuf;
1773 dnode_handle_t *dnh = dn->dn_handle;
1774
1775 refs = zfs_refcount_remove(&dn->dn_holds, tag);
1776 if (refs == 0)
1777 cv_broadcast(&dn->dn_nodnholds);
1778 mutex_exit(&dn->dn_mtx);
1779 /* dnode could get destroyed at this point, so don't use it anymore */
1780
1781 /*
1782 * It's unsafe to release the last hold on a dnode by dnode_rele() or
1783 * indirectly by dbuf_rele() while relying on the dnode handle to
1784 * prevent the dnode from moving, since releasing the last hold could
1785 * result in the dnode's parent dbuf evicting its dnode handles. For
1786 * that reason anyone calling dnode_rele() or dbuf_rele() without some
1787 * other direct or indirect hold on the dnode must first drop the dnode
1788 * handle.
1789 */
1790 #ifdef ZFS_DEBUG
1791 ASSERT(refs > 0 || zrl_owner(&dnh->dnh_zrlock) != curthread);
1792 #endif
1793
1794 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1795 if (refs == 0 && db != NULL) {
1796 /*
1797 * Another thread could add a hold to the dnode handle in
1798 * dnode_hold_impl() while holding the parent dbuf. Since the
1799 * hold on the parent dbuf prevents the handle from being
1800 * destroyed, the hold on the handle is OK. We can't yet assert
1801 * that the handle has zero references, but that will be
1802 * asserted anyway when the handle gets destroyed.
1803 */
1804 mutex_enter(&db->db_mtx);
1805 dbuf_rele_and_unlock(db, dnh, evicting);
1806 }
1807 }
1808
1809 /*
1810 * Test whether we can create a dnode at the specified location.
1811 */
1812 int
dnode_try_claim(objset_t * os,uint64_t object,int slots)1813 dnode_try_claim(objset_t *os, uint64_t object, int slots)
1814 {
1815 return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
1816 slots, NULL, NULL));
1817 }
1818
1819 /*
1820 * Checks if the dnode itself is dirty, or is carrying any uncommitted records.
1821 * It is important to check both conditions, as some operations (eg appending
1822 * to a file) can dirty both as a single logical unit, but they are not synced
1823 * out atomically, so checking one and not the other can result in an object
1824 * appearing to be clean mid-way through a commit.
1825 *
1826 * Do not change this lightly! If you get it wrong, dmu_offset_next() can
1827 * detect a hole where there is really data, leading to silent corruption.
1828 */
1829 boolean_t
dnode_is_dirty(dnode_t * dn)1830 dnode_is_dirty(dnode_t *dn)
1831 {
1832 mutex_enter(&dn->dn_mtx);
1833
1834 for (int i = 0; i < TXG_SIZE; i++) {
1835 if (multilist_link_active(&dn->dn_dirty_link[i]) ||
1836 !list_is_empty(&dn->dn_dirty_records[i])) {
1837 mutex_exit(&dn->dn_mtx);
1838 return (B_TRUE);
1839 }
1840 }
1841
1842 mutex_exit(&dn->dn_mtx);
1843
1844 return (B_FALSE);
1845 }
1846
1847 void
dnode_setdirty(dnode_t * dn,dmu_tx_t * tx)1848 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1849 {
1850 objset_t *os = dn->dn_objset;
1851 uint64_t txg = tx->tx_txg;
1852
1853 if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1854 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1855 return;
1856 }
1857
1858 DNODE_VERIFY(dn);
1859
1860 #ifdef ZFS_DEBUG
1861 mutex_enter(&dn->dn_mtx);
1862 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1863 ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1864 mutex_exit(&dn->dn_mtx);
1865 #endif
1866
1867 /*
1868 * Determine old uid/gid when necessary
1869 */
1870 dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
1871
1872 multilist_t *dirtylist = &os->os_dirty_dnodes[txg & TXG_MASK];
1873 multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
1874
1875 /*
1876 * If we are already marked dirty, we're done.
1877 */
1878 if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
1879 multilist_sublist_unlock(mls);
1880 return;
1881 }
1882
1883 ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
1884 !avl_is_empty(&dn->dn_dbufs));
1885 ASSERT(dn->dn_datablksz != 0);
1886 ASSERT0(dn->dn_next_bonuslen[txg & TXG_MASK]);
1887 ASSERT0(dn->dn_next_blksz[txg & TXG_MASK]);
1888 ASSERT0(dn->dn_next_bonustype[txg & TXG_MASK]);
1889
1890 dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
1891 (u_longlong_t)dn->dn_object, (u_longlong_t)txg);
1892
1893 multilist_sublist_insert_head(mls, dn);
1894
1895 multilist_sublist_unlock(mls);
1896
1897 /*
1898 * The dnode maintains a hold on its containing dbuf as
1899 * long as there are holds on it. Each instantiated child
1900 * dbuf maintains a hold on the dnode. When the last child
1901 * drops its hold, the dnode will drop its hold on the
1902 * containing dbuf. We add a "dirty hold" here so that the
1903 * dnode will hang around after we finish processing its
1904 * children.
1905 */
1906 VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1907
1908 (void) dbuf_dirty(dn->dn_dbuf, tx);
1909
1910 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1911 }
1912
1913 void
dnode_free(dnode_t * dn,dmu_tx_t * tx)1914 dnode_free(dnode_t *dn, dmu_tx_t *tx)
1915 {
1916 mutex_enter(&dn->dn_mtx);
1917 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1918 mutex_exit(&dn->dn_mtx);
1919 return;
1920 }
1921 dn->dn_free_txg = tx->tx_txg;
1922 mutex_exit(&dn->dn_mtx);
1923
1924 dnode_setdirty(dn, tx);
1925 }
1926
1927 /*
1928 * Try to change the block size for the indicated dnode. This can only
1929 * succeed if there are no blocks allocated or dirty beyond first block
1930 */
1931 int
dnode_set_blksz(dnode_t * dn,uint64_t size,int ibs,dmu_tx_t * tx)1932 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
1933 {
1934 dmu_buf_impl_t *db;
1935 int err;
1936
1937 ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
1938 if (size == 0)
1939 size = SPA_MINBLOCKSIZE;
1940 else
1941 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
1942
1943 if (ibs == dn->dn_indblkshift)
1944 ibs = 0;
1945
1946 if (size == dn->dn_datablksz && ibs == 0)
1947 return (0);
1948
1949 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1950
1951 /* Check for any allocated blocks beyond the first */
1952 if (dn->dn_maxblkid != 0)
1953 goto fail;
1954
1955 mutex_enter(&dn->dn_dbufs_mtx);
1956 for (db = avl_first(&dn->dn_dbufs); db != NULL;
1957 db = AVL_NEXT(&dn->dn_dbufs, db)) {
1958 if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
1959 db->db_blkid != DMU_SPILL_BLKID) {
1960 mutex_exit(&dn->dn_dbufs_mtx);
1961 goto fail;
1962 }
1963 }
1964 mutex_exit(&dn->dn_dbufs_mtx);
1965
1966 if (ibs && dn->dn_nlevels != 1)
1967 goto fail;
1968
1969 dnode_setdirty(dn, tx);
1970 if (size != dn->dn_datablksz) {
1971 /* resize the old block */
1972 err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
1973 if (err == 0) {
1974 dbuf_new_size(db, size, tx);
1975 } else if (err != ENOENT) {
1976 goto fail;
1977 }
1978
1979 dnode_setdblksz(dn, size);
1980 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = size;
1981 if (db)
1982 dbuf_rele(db, FTAG);
1983 }
1984 if (ibs) {
1985 dn->dn_indblkshift = ibs;
1986 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
1987 }
1988
1989 rw_exit(&dn->dn_struct_rwlock);
1990 return (0);
1991
1992 fail:
1993 rw_exit(&dn->dn_struct_rwlock);
1994 return (SET_ERROR(ENOTSUP));
1995 }
1996
1997 static void
dnode_set_nlevels_impl(dnode_t * dn,int new_nlevels,dmu_tx_t * tx)1998 dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
1999 {
2000 uint64_t txgoff = tx->tx_txg & TXG_MASK;
2001 int old_nlevels = dn->dn_nlevels;
2002 dmu_buf_impl_t *db;
2003 list_t *list;
2004 dbuf_dirty_record_t *new, *dr, *dr_next;
2005
2006 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2007
2008 ASSERT3U(new_nlevels, >, dn->dn_nlevels);
2009 dn->dn_nlevels = new_nlevels;
2010
2011 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
2012 dn->dn_next_nlevels[txgoff] = new_nlevels;
2013
2014 /* dirty the left indirects */
2015 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
2016 ASSERT(db != NULL);
2017 new = dbuf_dirty(db, tx);
2018 dbuf_rele(db, FTAG);
2019
2020 /* transfer the dirty records to the new indirect */
2021 mutex_enter(&dn->dn_mtx);
2022 mutex_enter(&new->dt.di.dr_mtx);
2023 list = &dn->dn_dirty_records[txgoff];
2024 for (dr = list_head(list); dr; dr = dr_next) {
2025 dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
2026
2027 IMPLY(dr->dr_dbuf == NULL, old_nlevels == 1);
2028 if (dr->dr_dbuf == NULL ||
2029 (dr->dr_dbuf->db_level == old_nlevels - 1 &&
2030 dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
2031 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID)) {
2032 list_remove(&dn->dn_dirty_records[txgoff], dr);
2033 list_insert_tail(&new->dt.di.dr_children, dr);
2034 dr->dr_parent = new;
2035 }
2036 }
2037 mutex_exit(&new->dt.di.dr_mtx);
2038 mutex_exit(&dn->dn_mtx);
2039 }
2040
2041 int
dnode_set_nlevels(dnode_t * dn,int nlevels,dmu_tx_t * tx)2042 dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
2043 {
2044 int ret = 0;
2045
2046 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2047
2048 if (dn->dn_nlevels == nlevels) {
2049 ret = 0;
2050 goto out;
2051 } else if (nlevels < dn->dn_nlevels) {
2052 ret = SET_ERROR(EINVAL);
2053 goto out;
2054 }
2055
2056 dnode_set_nlevels_impl(dn, nlevels, tx);
2057
2058 out:
2059 rw_exit(&dn->dn_struct_rwlock);
2060 return (ret);
2061 }
2062
2063 /* read-holding callers must not rely on the lock being continuously held */
2064 void
dnode_new_blkid(dnode_t * dn,uint64_t blkid,dmu_tx_t * tx,boolean_t have_read,boolean_t force)2065 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read,
2066 boolean_t force)
2067 {
2068 int epbs, new_nlevels;
2069 uint64_t sz;
2070
2071 ASSERT(blkid != DMU_BONUS_BLKID);
2072
2073 ASSERT(have_read ?
2074 RW_READ_HELD(&dn->dn_struct_rwlock) :
2075 RW_WRITE_HELD(&dn->dn_struct_rwlock));
2076
2077 /*
2078 * if we have a read-lock, check to see if we need to do any work
2079 * before upgrading to a write-lock.
2080 */
2081 if (have_read) {
2082 if (blkid <= dn->dn_maxblkid)
2083 return;
2084
2085 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
2086 rw_exit(&dn->dn_struct_rwlock);
2087 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2088 }
2089 }
2090
2091 /*
2092 * Raw sends (indicated by the force flag) require that we take the
2093 * given blkid even if the value is lower than the current value.
2094 */
2095 if (!force && blkid <= dn->dn_maxblkid)
2096 goto out;
2097
2098 /*
2099 * We use the (otherwise unused) top bit of dn_next_maxblkid[txgoff]
2100 * to indicate that this field is set. This allows us to set the
2101 * maxblkid to 0 on an existing object in dnode_sync().
2102 */
2103 dn->dn_maxblkid = blkid;
2104 dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
2105 blkid | DMU_NEXT_MAXBLKID_SET;
2106
2107 /*
2108 * Compute the number of levels necessary to support the new maxblkid.
2109 * Raw sends will ensure nlevels is set correctly for us.
2110 */
2111 new_nlevels = 1;
2112 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2113 for (sz = dn->dn_nblkptr;
2114 sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
2115 new_nlevels++;
2116
2117 ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS);
2118
2119 if (!force) {
2120 if (new_nlevels > dn->dn_nlevels)
2121 dnode_set_nlevels_impl(dn, new_nlevels, tx);
2122 } else {
2123 ASSERT3U(dn->dn_nlevels, >=, new_nlevels);
2124 }
2125
2126 out:
2127 if (have_read)
2128 rw_downgrade(&dn->dn_struct_rwlock);
2129 }
2130
2131 static void
dnode_dirty_l1(dnode_t * dn,uint64_t l1blkid,dmu_tx_t * tx)2132 dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
2133 {
2134 dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
2135 if (db != NULL) {
2136 dmu_buf_will_dirty(&db->db, tx);
2137 dbuf_rele(db, FTAG);
2138 }
2139 }
2140
2141 /*
2142 * Dirty all the in-core level-1 dbufs in the range specified by start_blkid
2143 * and end_blkid.
2144 */
2145 static void
dnode_dirty_l1range(dnode_t * dn,uint64_t start_blkid,uint64_t end_blkid,dmu_tx_t * tx)2146 dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
2147 dmu_tx_t *tx)
2148 {
2149 dmu_buf_impl_t *db_search;
2150 dmu_buf_impl_t *db;
2151 avl_index_t where;
2152
2153 db_search = kmem_zalloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
2154
2155 mutex_enter(&dn->dn_dbufs_mtx);
2156
2157 db_search->db_level = 1;
2158 db_search->db_blkid = start_blkid + 1;
2159 db_search->db_state = DB_SEARCH;
2160 for (;;) {
2161
2162 db = avl_find(&dn->dn_dbufs, db_search, &where);
2163 if (db == NULL)
2164 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
2165
2166 if (db == NULL || db->db_level != 1 ||
2167 db->db_blkid >= end_blkid) {
2168 break;
2169 }
2170
2171 /*
2172 * Setup the next blkid we want to search for.
2173 */
2174 db_search->db_blkid = db->db_blkid + 1;
2175 ASSERT3U(db->db_blkid, >=, start_blkid);
2176
2177 /*
2178 * If the dbuf transitions to DB_EVICTING while we're trying
2179 * to dirty it, then we will be unable to discover it in
2180 * the dbuf hash table. This will result in a call to
2181 * dbuf_create() which needs to acquire the dn_dbufs_mtx
2182 * lock. To avoid a deadlock, we drop the lock before
2183 * dirtying the level-1 dbuf.
2184 */
2185 mutex_exit(&dn->dn_dbufs_mtx);
2186 dnode_dirty_l1(dn, db->db_blkid, tx);
2187 mutex_enter(&dn->dn_dbufs_mtx);
2188 }
2189
2190 #ifdef ZFS_DEBUG
2191 /*
2192 * Walk all the in-core level-1 dbufs and verify they have been dirtied.
2193 */
2194 db_search->db_level = 1;
2195 db_search->db_blkid = start_blkid + 1;
2196 db_search->db_state = DB_SEARCH;
2197 db = avl_find(&dn->dn_dbufs, db_search, &where);
2198 if (db == NULL)
2199 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
2200 for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
2201 if (db->db_level != 1 || db->db_blkid >= end_blkid)
2202 break;
2203 if (db->db_state != DB_EVICTING)
2204 ASSERT(db->db_dirtycnt > 0);
2205 }
2206 #endif
2207 kmem_free(db_search, sizeof (dmu_buf_impl_t));
2208 mutex_exit(&dn->dn_dbufs_mtx);
2209 }
2210
2211 void
dnode_set_dirtyctx(dnode_t * dn,dmu_tx_t * tx,const void * tag)2212 dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
2213 {
2214 /*
2215 * Don't set dirtyctx to SYNC if we're just modifying this as we
2216 * initialize the objset.
2217 */
2218 if (dn->dn_dirtyctx == DN_UNDIRTIED) {
2219 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
2220
2221 if (ds != NULL) {
2222 rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
2223 }
2224 if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
2225 if (dmu_tx_is_syncing(tx))
2226 dn->dn_dirtyctx = DN_DIRTY_SYNC;
2227 else
2228 dn->dn_dirtyctx = DN_DIRTY_OPEN;
2229 dn->dn_dirtyctx_firstset = tag;
2230 }
2231 if (ds != NULL) {
2232 rrw_exit(&ds->ds_bp_rwlock, tag);
2233 }
2234 }
2235 }
2236
2237 static void
dnode_partial_zero(dnode_t * dn,uint64_t off,uint64_t blkoff,uint64_t len,dmu_tx_t * tx)2238 dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
2239 dmu_tx_t *tx)
2240 {
2241 dmu_buf_impl_t *db;
2242 int res;
2243
2244 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2245 res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off), TRUE, FALSE,
2246 FTAG, &db);
2247 rw_exit(&dn->dn_struct_rwlock);
2248 if (res == 0) {
2249 db_lock_type_t dblt;
2250 boolean_t dirty;
2251
2252 dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2253 /* don't dirty if not on disk and not dirty */
2254 dirty = !list_is_empty(&db->db_dirty_records) ||
2255 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
2256 dmu_buf_unlock_parent(db, dblt, FTAG);
2257 if (dirty) {
2258 caddr_t data;
2259
2260 dmu_buf_will_dirty(&db->db, tx);
2261 data = db->db.db_data;
2262 memset(data + blkoff, 0, len);
2263 }
2264 dbuf_rele(db, FTAG);
2265 }
2266 }
2267
2268 void
dnode_free_range(dnode_t * dn,uint64_t off,uint64_t len,dmu_tx_t * tx)2269 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
2270 {
2271 uint64_t blkoff, blkid, nblks;
2272 int blksz, blkshift, head, tail;
2273 int trunc = FALSE;
2274 int epbs;
2275
2276 blksz = dn->dn_datablksz;
2277 blkshift = dn->dn_datablkshift;
2278 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2279
2280 if (len == DMU_OBJECT_END) {
2281 len = UINT64_MAX - off;
2282 trunc = TRUE;
2283 }
2284
2285 /*
2286 * First, block align the region to free:
2287 */
2288 if (ISP2(blksz)) {
2289 head = P2NPHASE(off, blksz);
2290 blkoff = P2PHASE(off, blksz);
2291 if ((off >> blkshift) > dn->dn_maxblkid)
2292 return;
2293 } else {
2294 ASSERT(dn->dn_maxblkid == 0);
2295 if (off == 0 && len >= blksz) {
2296 /*
2297 * Freeing the whole block; fast-track this request.
2298 */
2299 blkid = 0;
2300 nblks = 1;
2301 if (dn->dn_nlevels > 1) {
2302 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2303 dnode_dirty_l1(dn, 0, tx);
2304 rw_exit(&dn->dn_struct_rwlock);
2305 }
2306 goto done;
2307 } else if (off >= blksz) {
2308 /* Freeing past end-of-data */
2309 return;
2310 } else {
2311 /* Freeing part of the block. */
2312 head = blksz - off;
2313 ASSERT3U(head, >, 0);
2314 }
2315 blkoff = off;
2316 }
2317 /* zero out any partial block data at the start of the range */
2318 if (head) {
2319 ASSERT3U(blkoff + head, ==, blksz);
2320 if (len < head)
2321 head = len;
2322 dnode_partial_zero(dn, off, blkoff, head, tx);
2323 off += head;
2324 len -= head;
2325 }
2326
2327 /* If the range was less than one block, we're done */
2328 if (len == 0)
2329 return;
2330
2331 /* If the remaining range is past end of file, we're done */
2332 if ((off >> blkshift) > dn->dn_maxblkid)
2333 return;
2334
2335 ASSERT(ISP2(blksz));
2336 if (trunc)
2337 tail = 0;
2338 else
2339 tail = P2PHASE(len, blksz);
2340
2341 ASSERT0(P2PHASE(off, blksz));
2342 /* zero out any partial block data at the end of the range */
2343 if (tail) {
2344 if (len < tail)
2345 tail = len;
2346 dnode_partial_zero(dn, off + len, 0, tail, tx);
2347 len -= tail;
2348 }
2349
2350 /* If the range did not include a full block, we are done */
2351 if (len == 0)
2352 return;
2353
2354 ASSERT(IS_P2ALIGNED(off, blksz));
2355 ASSERT(trunc || IS_P2ALIGNED(len, blksz));
2356 blkid = off >> blkshift;
2357 nblks = len >> blkshift;
2358 if (trunc)
2359 nblks += 1;
2360
2361 /*
2362 * Dirty all the indirect blocks in this range. Note that only
2363 * the first and last indirect blocks can actually be written
2364 * (if they were partially freed) -- they must be dirtied, even if
2365 * they do not exist on disk yet. The interior blocks will
2366 * be freed by free_children(), so they will not actually be written.
2367 * Even though these interior blocks will not be written, we
2368 * dirty them for two reasons:
2369 *
2370 * - It ensures that the indirect blocks remain in memory until
2371 * syncing context. (They have already been prefetched by
2372 * dmu_tx_hold_free(), so we don't have to worry about reading
2373 * them serially here.)
2374 *
2375 * - The dirty space accounting will put pressure on the txg sync
2376 * mechanism to begin syncing, and to delay transactions if there
2377 * is a large amount of freeing. Even though these indirect
2378 * blocks will not be written, we could need to write the same
2379 * amount of space if we copy the freed BPs into deadlists.
2380 */
2381 if (dn->dn_nlevels > 1) {
2382 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2383 uint64_t first, last;
2384
2385 first = blkid >> epbs;
2386 dnode_dirty_l1(dn, first, tx);
2387 if (trunc)
2388 last = dn->dn_maxblkid >> epbs;
2389 else
2390 last = (blkid + nblks - 1) >> epbs;
2391 if (last != first)
2392 dnode_dirty_l1(dn, last, tx);
2393
2394 dnode_dirty_l1range(dn, first, last, tx);
2395
2396 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
2397 SPA_BLKPTRSHIFT;
2398 for (uint64_t i = first + 1; i < last; i++) {
2399 /*
2400 * Set i to the blockid of the next non-hole
2401 * level-1 indirect block at or after i. Note
2402 * that dnode_next_offset() operates in terms of
2403 * level-0-equivalent bytes.
2404 */
2405 uint64_t ibyte = i << shift;
2406 int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
2407 &ibyte, 2, 1, 0);
2408 i = ibyte >> shift;
2409 if (i >= last)
2410 break;
2411
2412 /*
2413 * Normally we should not see an error, either
2414 * from dnode_next_offset() or dbuf_hold_level()
2415 * (except for ESRCH from dnode_next_offset).
2416 * If there is an i/o error, then when we read
2417 * this block in syncing context, it will use
2418 * ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
2419 * to the "failmode" property. dnode_next_offset()
2420 * doesn't have a flag to indicate MUSTSUCCEED.
2421 */
2422 if (err != 0)
2423 break;
2424
2425 dnode_dirty_l1(dn, i, tx);
2426 }
2427 rw_exit(&dn->dn_struct_rwlock);
2428 }
2429
2430 done:
2431 /*
2432 * Add this range to the dnode range list.
2433 * We will finish up this free operation in the syncing phase.
2434 */
2435 mutex_enter(&dn->dn_mtx);
2436 {
2437 int txgoff = tx->tx_txg & TXG_MASK;
2438 if (dn->dn_free_ranges[txgoff] == NULL) {
2439 dn->dn_free_ranges[txgoff] = zfs_range_tree_create(NULL,
2440 ZFS_RANGE_SEG64, NULL, 0, 0);
2441 }
2442 zfs_range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
2443 zfs_range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
2444 }
2445 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
2446 (u_longlong_t)blkid, (u_longlong_t)nblks,
2447 (u_longlong_t)tx->tx_txg);
2448 mutex_exit(&dn->dn_mtx);
2449
2450 dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
2451 dnode_setdirty(dn, tx);
2452 }
2453
2454 static boolean_t
dnode_spill_freed(dnode_t * dn)2455 dnode_spill_freed(dnode_t *dn)
2456 {
2457 int i;
2458
2459 mutex_enter(&dn->dn_mtx);
2460 for (i = 0; i < TXG_SIZE; i++) {
2461 if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
2462 break;
2463 }
2464 mutex_exit(&dn->dn_mtx);
2465 return (i < TXG_SIZE);
2466 }
2467
2468 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
2469 uint64_t
dnode_block_freed(dnode_t * dn,uint64_t blkid)2470 dnode_block_freed(dnode_t *dn, uint64_t blkid)
2471 {
2472 int i;
2473
2474 if (blkid == DMU_BONUS_BLKID)
2475 return (FALSE);
2476
2477 if (dn->dn_free_txg)
2478 return (TRUE);
2479
2480 if (blkid == DMU_SPILL_BLKID)
2481 return (dnode_spill_freed(dn));
2482
2483 mutex_enter(&dn->dn_mtx);
2484 for (i = 0; i < TXG_SIZE; i++) {
2485 if (dn->dn_free_ranges[i] != NULL &&
2486 zfs_range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
2487 break;
2488 }
2489 mutex_exit(&dn->dn_mtx);
2490 return (i < TXG_SIZE);
2491 }
2492
2493 /* call from syncing context when we actually write/free space for this dnode */
2494 void
dnode_diduse_space(dnode_t * dn,int64_t delta)2495 dnode_diduse_space(dnode_t *dn, int64_t delta)
2496 {
2497 uint64_t space;
2498 dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
2499 dn, dn->dn_phys,
2500 (u_longlong_t)dn->dn_phys->dn_used,
2501 (longlong_t)delta);
2502
2503 mutex_enter(&dn->dn_mtx);
2504 space = DN_USED_BYTES(dn->dn_phys);
2505 if (delta > 0) {
2506 ASSERT3U(space + delta, >=, space); /* no overflow */
2507 } else {
2508 ASSERT3U(space, >=, -delta); /* no underflow */
2509 }
2510 space += delta;
2511 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
2512 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
2513 ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
2514 dn->dn_phys->dn_used = space >> DEV_BSHIFT;
2515 } else {
2516 dn->dn_phys->dn_used = space;
2517 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
2518 }
2519 mutex_exit(&dn->dn_mtx);
2520 }
2521
2522 /*
2523 * Scans a block at the indicated "level" looking for a hole or data,
2524 * depending on 'flags'.
2525 *
2526 * If level > 0, then we are scanning an indirect block looking at its
2527 * pointers. If level == 0, then we are looking at a block of dnodes.
2528 *
2529 * If we don't find what we are looking for in the block, we return ESRCH.
2530 * Otherwise, return with *offset pointing to the beginning (if searching
2531 * forwards) or end (if searching backwards) of the range covered by the
2532 * block pointer we matched on (or dnode).
2533 *
2534 * The basic search algorithm used below by dnode_next_offset() is to
2535 * use this function to search up the block tree (widen the search) until
2536 * we find something (i.e., we don't return ESRCH) and then search back
2537 * down the tree (narrow the search) until we reach our original search
2538 * level.
2539 */
2540 static int
dnode_next_offset_level(dnode_t * dn,int flags,uint64_t * offset,int lvl,uint64_t blkfill,uint64_t txg)2541 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
2542 int lvl, uint64_t blkfill, uint64_t txg)
2543 {
2544 dmu_buf_impl_t *db = NULL;
2545 void *data = NULL;
2546 uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2547 uint64_t epb = 1ULL << epbs;
2548 uint64_t minfill, maxfill;
2549 boolean_t hole;
2550 int i, inc, error, span;
2551
2552 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2553
2554 hole = ((flags & DNODE_FIND_HOLE) != 0);
2555 inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
2556 ASSERT(txg == 0 || !hole);
2557
2558 if (lvl == dn->dn_phys->dn_nlevels) {
2559 error = 0;
2560 epb = dn->dn_phys->dn_nblkptr;
2561 data = dn->dn_phys->dn_blkptr;
2562 } else {
2563 uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
2564 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
2565 if (error) {
2566 if (error != ENOENT)
2567 return (error);
2568 if (hole)
2569 return (0);
2570 /*
2571 * This can only happen when we are searching up
2572 * the block tree for data. We don't really need to
2573 * adjust the offset, as we will just end up looking
2574 * at the pointer to this block in its parent, and its
2575 * going to be unallocated, so we will skip over it.
2576 */
2577 return (SET_ERROR(ESRCH));
2578 }
2579 error = dbuf_read(db, NULL,
2580 DB_RF_CANFAIL | DB_RF_HAVESTRUCT |
2581 DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
2582 if (error) {
2583 dbuf_rele(db, FTAG);
2584 return (error);
2585 }
2586 data = db->db.db_data;
2587 rw_enter(&db->db_rwlock, RW_READER);
2588 }
2589
2590 if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
2591 BP_GET_LOGICAL_BIRTH(db->db_blkptr) <= txg ||
2592 BP_IS_HOLE(db->db_blkptr))) {
2593 /*
2594 * This can only happen when we are searching up the tree
2595 * and these conditions mean that we need to keep climbing.
2596 */
2597 error = SET_ERROR(ESRCH);
2598 } else if (lvl == 0) {
2599 dnode_phys_t *dnp = data;
2600
2601 ASSERT(dn->dn_type == DMU_OT_DNODE);
2602 ASSERT(!(flags & DNODE_FIND_BACKWARDS));
2603
2604 for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
2605 i < blkfill; i += dnp[i].dn_extra_slots + 1) {
2606 if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
2607 break;
2608 }
2609
2610 if (i == blkfill)
2611 error = SET_ERROR(ESRCH);
2612
2613 *offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
2614 (i << DNODE_SHIFT);
2615 } else {
2616 blkptr_t *bp = data;
2617 uint64_t start = *offset;
2618 span = (lvl - 1) * epbs + dn->dn_datablkshift;
2619 minfill = 0;
2620 maxfill = blkfill << ((lvl - 1) * epbs);
2621
2622 if (hole)
2623 maxfill--;
2624 else
2625 minfill++;
2626
2627 if (span >= 8 * sizeof (*offset)) {
2628 /* This only happens on the highest indirection level */
2629 ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1);
2630 *offset = 0;
2631 } else {
2632 *offset = *offset >> span;
2633 }
2634
2635 for (i = BF64_GET(*offset, 0, epbs);
2636 i >= 0 && i < epb; i += inc) {
2637 if (BP_GET_FILL(&bp[i]) >= minfill &&
2638 BP_GET_FILL(&bp[i]) <= maxfill &&
2639 (hole || BP_GET_LOGICAL_BIRTH(&bp[i]) > txg))
2640 break;
2641 if (inc > 0 || *offset > 0)
2642 *offset += inc;
2643 }
2644
2645 if (span >= 8 * sizeof (*offset)) {
2646 *offset = start;
2647 } else {
2648 *offset = *offset << span;
2649 }
2650
2651 if (inc < 0) {
2652 /* traversing backwards; position offset at the end */
2653 if (span < 8 * sizeof (*offset))
2654 *offset = MIN(*offset + (1ULL << span) - 1,
2655 start);
2656 } else if (*offset < start) {
2657 *offset = start;
2658 }
2659 if (i < 0 || i >= epb)
2660 error = SET_ERROR(ESRCH);
2661 }
2662
2663 if (db != NULL) {
2664 rw_exit(&db->db_rwlock);
2665 dbuf_rele(db, FTAG);
2666 }
2667
2668 return (error);
2669 }
2670
2671 /*
2672 * Find the next hole, data, or sparse region at or after *offset.
2673 * The value 'blkfill' tells us how many items we expect to find
2674 * in an L0 data block; this value is 1 for normal objects,
2675 * DNODES_PER_BLOCK for the meta dnode, and some fraction of
2676 * DNODES_PER_BLOCK when searching for sparse regions thereof.
2677 *
2678 * Examples:
2679 *
2680 * dnode_next_offset(dn, flags, offset, 1, 1, 0);
2681 * Finds the next/previous hole/data in a file.
2682 * Used in dmu_offset_next().
2683 *
2684 * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
2685 * Finds the next free/allocated dnode an objset's meta-dnode.
2686 * Only finds objects that have new contents since txg (ie.
2687 * bonus buffer changes and content removal are ignored).
2688 * Used in dmu_object_next().
2689 *
2690 * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
2691 * Finds the next L2 meta-dnode bp that's at most 1/4 full.
2692 * Used in dmu_object_alloc().
2693 */
2694 int
dnode_next_offset(dnode_t * dn,int flags,uint64_t * offset,int minlvl,uint64_t blkfill,uint64_t txg)2695 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
2696 int minlvl, uint64_t blkfill, uint64_t txg)
2697 {
2698 uint64_t initial_offset = *offset;
2699 int lvl, maxlvl;
2700 int error = 0;
2701
2702 if (!(flags & DNODE_FIND_HAVELOCK))
2703 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2704
2705 if (dn->dn_phys->dn_nlevels == 0) {
2706 error = SET_ERROR(ESRCH);
2707 goto out;
2708 }
2709
2710 if (dn->dn_datablkshift == 0) {
2711 if (*offset < dn->dn_datablksz) {
2712 if (flags & DNODE_FIND_HOLE)
2713 *offset = dn->dn_datablksz;
2714 } else {
2715 error = SET_ERROR(ESRCH);
2716 }
2717 goto out;
2718 }
2719
2720 maxlvl = dn->dn_phys->dn_nlevels;
2721
2722 for (lvl = minlvl; lvl <= maxlvl; lvl++) {
2723 error = dnode_next_offset_level(dn,
2724 flags, offset, lvl, blkfill, txg);
2725 if (error != ESRCH)
2726 break;
2727 }
2728
2729 while (error == 0 && --lvl >= minlvl) {
2730 error = dnode_next_offset_level(dn,
2731 flags, offset, lvl, blkfill, txg);
2732 }
2733
2734 /*
2735 * There's always a "virtual hole" at the end of the object, even
2736 * if all BP's which physically exist are non-holes.
2737 */
2738 if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
2739 minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
2740 error = 0;
2741 }
2742
2743 if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
2744 initial_offset < *offset : initial_offset > *offset))
2745 error = SET_ERROR(ESRCH);
2746 out:
2747 if (!(flags & DNODE_FIND_HAVELOCK))
2748 rw_exit(&dn->dn_struct_rwlock);
2749
2750 return (error);
2751 }
2752
2753 #if defined(_KERNEL)
2754 EXPORT_SYMBOL(dnode_hold);
2755 EXPORT_SYMBOL(dnode_rele);
2756 EXPORT_SYMBOL(dnode_set_nlevels);
2757 EXPORT_SYMBOL(dnode_set_blksz);
2758 EXPORT_SYMBOL(dnode_free_range);
2759 EXPORT_SYMBOL(dnode_evict_dbufs);
2760 EXPORT_SYMBOL(dnode_evict_bonus);
2761 #endif
2762
2763 ZFS_MODULE_PARAM(zfs, zfs_, default_bs, INT, ZMOD_RW,
2764 "Default dnode block shift");
2765 ZFS_MODULE_PARAM(zfs, zfs_, default_ibs, INT, ZMOD_RW,
2766 "Default dnode indirect block shift");
2767