1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2020 Oxide Computer Company
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/dbuf.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_recv.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/spa.h>
38 #include <sys/range_tree.h>
39 #include <sys/zfeature.h>
40
41 static void
dnode_increase_indirection(dnode_t * dn,dmu_tx_t * tx)42 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
43 {
44 dmu_buf_impl_t *db;
45 int txgoff = tx->tx_txg & TXG_MASK;
46 int nblkptr = dn->dn_phys->dn_nblkptr;
47 int old_toplvl = dn->dn_phys->dn_nlevels - 1;
48 int new_level = dn->dn_next_nlevels[txgoff];
49 int i;
50
51 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
52
53 /* this dnode can't be paged out because it's dirty */
54 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
55 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
56
57 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
58 ASSERT(db != NULL);
59
60 dn->dn_phys->dn_nlevels = new_level;
61 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
62 dn->dn_object, dn->dn_phys->dn_nlevels);
63
64 /*
65 * Lock ordering requires that we hold the children's db_mutexes (by
66 * calling dbuf_find()) before holding the parent's db_rwlock. The lock
67 * order is imposed by dbuf_read's steps of "grab the lock to protect
68 * db_parent, get db_parent, hold db_parent's db_rwlock".
69 */
70 dmu_buf_impl_t *children[DN_MAX_NBLKPTR];
71 ASSERT3U(nblkptr, <=, DN_MAX_NBLKPTR);
72 for (i = 0; i < nblkptr; i++) {
73 children[i] =
74 dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i);
75 }
76
77 /* transfer dnode's block pointers to new indirect block */
78 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
79 if (dn->dn_dbuf != NULL)
80 rw_enter(&dn->dn_dbuf->db_rwlock, RW_WRITER);
81 rw_enter(&db->db_rwlock, RW_WRITER);
82 ASSERT(db->db.db_data);
83 ASSERT(arc_released(db->db_buf));
84 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
85 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
86 sizeof (blkptr_t) * nblkptr);
87 arc_buf_freeze(db->db_buf);
88
89 /* set dbuf's parent pointers to new indirect buf */
90 for (i = 0; i < nblkptr; i++) {
91 dmu_buf_impl_t *child = children[i];
92
93 if (child == NULL)
94 continue;
95 #ifdef DEBUG
96 DB_DNODE_ENTER(child);
97 ASSERT3P(DB_DNODE(child), ==, dn);
98 DB_DNODE_EXIT(child);
99 #endif /* DEBUG */
100 if (child->db_parent && child->db_parent != dn->dn_dbuf) {
101 ASSERT(child->db_parent->db_level == db->db_level);
102 ASSERT(child->db_blkptr !=
103 &dn->dn_phys->dn_blkptr[child->db_blkid]);
104 mutex_exit(&child->db_mtx);
105 continue;
106 }
107 ASSERT(child->db_parent == NULL ||
108 child->db_parent == dn->dn_dbuf);
109
110 child->db_parent = db;
111 dbuf_add_ref(db, child);
112 if (db->db.db_data)
113 child->db_blkptr = (blkptr_t *)db->db.db_data + i;
114 else
115 child->db_blkptr = NULL;
116 dprintf_dbuf_bp(child, child->db_blkptr,
117 "changed db_blkptr to new indirect %s", "");
118
119 mutex_exit(&child->db_mtx);
120 }
121
122 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
123
124 rw_exit(&db->db_rwlock);
125 if (dn->dn_dbuf != NULL)
126 rw_exit(&dn->dn_dbuf->db_rwlock);
127
128 dbuf_rele(db, FTAG);
129
130 rw_exit(&dn->dn_struct_rwlock);
131 }
132
133 static void
free_blocks(dnode_t * dn,blkptr_t * bp,int num,dmu_tx_t * tx)134 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
135 {
136 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
137 uint64_t bytesfreed = 0;
138
139 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
140
141 for (int i = 0; i < num; i++, bp++) {
142 if (BP_IS_HOLE(bp))
143 continue;
144
145 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
146 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
147
148 /*
149 * Save some useful information on the holes being
150 * punched, including logical size, type, and indirection
151 * level. Retaining birth time enables detection of when
152 * holes are punched for reducing the number of free
153 * records transmitted during a zfs send.
154 */
155
156 uint64_t lsize = BP_GET_LSIZE(bp);
157 dmu_object_type_t type = BP_GET_TYPE(bp);
158 uint64_t lvl = BP_GET_LEVEL(bp);
159
160 bzero(bp, sizeof (blkptr_t));
161
162 if (spa_feature_is_active(dn->dn_objset->os_spa,
163 SPA_FEATURE_HOLE_BIRTH)) {
164 BP_SET_LSIZE(bp, lsize);
165 BP_SET_TYPE(bp, type);
166 BP_SET_LEVEL(bp, lvl);
167 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
168 }
169 }
170 dnode_diduse_space(dn, -bytesfreed);
171 }
172
173 #ifdef ZFS_DEBUG
174 static void
free_verify(dmu_buf_impl_t * db,uint64_t start,uint64_t end,dmu_tx_t * tx)175 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
176 {
177 int off, num;
178 int i, err, epbs;
179 uint64_t txg = tx->tx_txg;
180 dnode_t *dn;
181
182 DB_DNODE_ENTER(db);
183 dn = DB_DNODE(db);
184 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
185 off = start - (db->db_blkid * 1<<epbs);
186 num = end - start + 1;
187
188 ASSERT3U(off, >=, 0);
189 ASSERT3U(num, >=, 0);
190 ASSERT3U(db->db_level, >, 0);
191 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
192 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
193 ASSERT(db->db_blkptr != NULL);
194
195 for (i = off; i < off+num; i++) {
196 uint64_t *buf;
197 dmu_buf_impl_t *child;
198 dbuf_dirty_record_t *dr;
199 int j;
200
201 ASSERT(db->db_level == 1);
202
203 rw_enter(&dn->dn_struct_rwlock, RW_READER);
204 err = dbuf_hold_impl(dn, db->db_level - 1,
205 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
206 rw_exit(&dn->dn_struct_rwlock);
207 if (err == ENOENT)
208 continue;
209 ASSERT(err == 0);
210 ASSERT(child->db_level == 0);
211 dr = child->db_last_dirty;
212 while (dr && dr->dr_txg > txg)
213 dr = dr->dr_next;
214 ASSERT(dr == NULL || dr->dr_txg == txg);
215
216 /* data_old better be zeroed */
217 if (dr) {
218 buf = dr->dt.dl.dr_data->b_data;
219 for (j = 0; j < child->db.db_size >> 3; j++) {
220 if (buf[j] != 0) {
221 panic("freed data not zero: "
222 "child=%p i=%d off=%d num=%d\n",
223 (void *)child, i, off, num);
224 }
225 }
226 }
227
228 /*
229 * db_data better be zeroed unless it's dirty in a
230 * future txg.
231 */
232 mutex_enter(&child->db_mtx);
233 buf = child->db.db_data;
234 if (buf != NULL && child->db_state != DB_FILL &&
235 child->db_last_dirty == NULL) {
236 for (j = 0; j < child->db.db_size >> 3; j++) {
237 if (buf[j] != 0) {
238 panic("freed data not zero: "
239 "child=%p i=%d off=%d num=%d\n",
240 (void *)child, i, off, num);
241 }
242 }
243 }
244 mutex_exit(&child->db_mtx);
245
246 dbuf_rele(child, FTAG);
247 }
248 DB_DNODE_EXIT(db);
249 }
250 #endif
251
252 /*
253 * We don't usually free the indirect blocks here. If in one txg we have a
254 * free_range and a write to the same indirect block, it's important that we
255 * preserve the hole's birth times. Therefore, we don't free any any indirect
256 * blocks in free_children(). If an indirect block happens to turn into all
257 * holes, it will be freed by dbuf_write_children_ready, which happens at a
258 * point in the syncing process where we know for certain the contents of the
259 * indirect block.
260 *
261 * However, if we're freeing a dnode, its space accounting must go to zero
262 * before we actually try to free the dnode, or we will trip an assertion. In
263 * addition, we know the case described above cannot occur, because the dnode is
264 * being freed. Therefore, we free the indirect blocks immediately in that
265 * case.
266 */
267 static void
free_children(dmu_buf_impl_t * db,uint64_t blkid,uint64_t nblks,boolean_t free_indirects,dmu_tx_t * tx)268 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
269 boolean_t free_indirects, dmu_tx_t *tx)
270 {
271 dnode_t *dn;
272 blkptr_t *bp;
273 dmu_buf_impl_t *subdb;
274 uint64_t start, end, dbstart, dbend;
275 unsigned int epbs, shift, i;
276
277 /*
278 * There is a small possibility that this block will not be cached:
279 * 1 - if level > 1 and there are no children with level <= 1
280 * 2 - if this block was evicted since we read it from
281 * dmu_tx_hold_free().
282 */
283 if (db->db_state != DB_CACHED)
284 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
285
286 /*
287 * If we modify this indirect block, and we are not freeing the
288 * dnode (!free_indirects), then this indirect block needs to get
289 * written to disk by dbuf_write(). If it is dirty, we know it will
290 * be written (otherwise, we would have incorrect on-disk state
291 * because the space would be freed but still referenced by the BP
292 * in this indirect block). Therefore we VERIFY that it is
293 * dirty.
294 *
295 * Our VERIFY covers some cases that do not actually have to be
296 * dirty, but the open-context code happens to dirty. E.g. if the
297 * blocks we are freeing are all holes, because in that case, we
298 * are only freeing part of this indirect block, so it is an
299 * ancestor of the first or last block to be freed. The first and
300 * last L1 indirect blocks are always dirtied by dnode_free_range().
301 */
302 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
303 VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0);
304 dmu_buf_unlock_parent(db, dblt, FTAG);
305
306 dbuf_release_bp(db);
307 bp = db->db.db_data;
308
309 DB_DNODE_ENTER(db);
310 dn = DB_DNODE(db);
311 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
312 ASSERT3U(epbs, <, 31);
313 shift = (db->db_level - 1) * epbs;
314 dbstart = db->db_blkid << epbs;
315 start = blkid >> shift;
316 if (dbstart < start) {
317 bp += start - dbstart;
318 } else {
319 start = dbstart;
320 }
321 dbend = ((db->db_blkid + 1) << epbs) - 1;
322 end = (blkid + nblks - 1) >> shift;
323 if (dbend <= end)
324 end = dbend;
325
326 ASSERT3U(start, <=, end);
327
328 if (db->db_level == 1) {
329 FREE_VERIFY(db, start, end, tx);
330 rw_enter(&db->db_rwlock, RW_WRITER);
331 free_blocks(dn, bp, end - start + 1, tx);
332 rw_exit(&db->db_rwlock);
333 } else {
334 for (uint64_t id = start; id <= end; id++, bp++) {
335 if (BP_IS_HOLE(bp))
336 continue;
337 rw_enter(&dn->dn_struct_rwlock, RW_READER);
338 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
339 id, TRUE, FALSE, FTAG, &subdb));
340 rw_exit(&dn->dn_struct_rwlock);
341 ASSERT3P(bp, ==, subdb->db_blkptr);
342
343 free_children(subdb, blkid, nblks, free_indirects, tx);
344 dbuf_rele(subdb, FTAG);
345 }
346 }
347
348 if (free_indirects) {
349 rw_enter(&db->db_rwlock, RW_WRITER);
350 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
351 ASSERT(BP_IS_HOLE(bp));
352 bzero(db->db.db_data, db->db.db_size);
353 free_blocks(dn, db->db_blkptr, 1, tx);
354 rw_exit(&db->db_rwlock);
355 }
356
357 DB_DNODE_EXIT(db);
358 arc_buf_freeze(db->db_buf);
359 }
360
361 /*
362 * Traverse the indicated range of the provided file
363 * and "free" all the blocks contained there.
364 */
365 static void
dnode_sync_free_range_impl(dnode_t * dn,uint64_t blkid,uint64_t nblks,boolean_t free_indirects,dmu_tx_t * tx)366 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
367 boolean_t free_indirects, dmu_tx_t *tx)
368 {
369 blkptr_t *bp = dn->dn_phys->dn_blkptr;
370 int dnlevel = dn->dn_phys->dn_nlevels;
371 boolean_t trunc = B_FALSE;
372
373 if (blkid > dn->dn_phys->dn_maxblkid)
374 return;
375
376 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
377 if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
378 nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
379 trunc = B_TRUE;
380 }
381
382 /* There are no indirect blocks in the object */
383 if (dnlevel == 1) {
384 if (blkid >= dn->dn_phys->dn_nblkptr) {
385 /* this range was never made persistent */
386 return;
387 }
388 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
389 free_blocks(dn, bp + blkid, nblks, tx);
390 } else {
391 int shift = (dnlevel - 1) *
392 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
393 int start = blkid >> shift;
394 int end = (blkid + nblks - 1) >> shift;
395 dmu_buf_impl_t *db;
396
397 ASSERT(start < dn->dn_phys->dn_nblkptr);
398 bp += start;
399 for (int i = start; i <= end; i++, bp++) {
400 if (BP_IS_HOLE(bp))
401 continue;
402 rw_enter(&dn->dn_struct_rwlock, RW_READER);
403 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
404 TRUE, FALSE, FTAG, &db));
405 rw_exit(&dn->dn_struct_rwlock);
406 free_children(db, blkid, nblks, free_indirects, tx);
407 dbuf_rele(db, FTAG);
408 }
409 }
410
411 /*
412 * Do not truncate the maxblkid if we are performing a raw
413 * receive. The raw receive sets the maxblkid manually and
414 * must not be overridden. Usually, the last DRR_FREE record
415 * will be at the maxblkid, because the source system sets
416 * the maxblkid when truncating. However, if the last block
417 * was freed by overwriting with zeros and being compressed
418 * away to a hole, the source system will generate a DRR_FREE
419 * record while leaving the maxblkid after the end of that
420 * record. In this case we need to leave the maxblkid as
421 * indicated in the DRR_OBJECT record, so that it matches the
422 * source system, ensuring that the cryptographic hashes will
423 * match.
424 */
425 if (trunc && !dn->dn_objset->os_raw_receive) {
426 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
427
428 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
429 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
430 ASSERT(off < dn->dn_phys->dn_maxblkid ||
431 dn->dn_phys->dn_maxblkid == 0 ||
432 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
433 }
434 }
435
436 typedef struct dnode_sync_free_range_arg {
437 dnode_t *dsfra_dnode;
438 dmu_tx_t *dsfra_tx;
439 boolean_t dsfra_free_indirects;
440 } dnode_sync_free_range_arg_t;
441
442 static void
dnode_sync_free_range(void * arg,uint64_t blkid,uint64_t nblks)443 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
444 {
445 dnode_sync_free_range_arg_t *dsfra = arg;
446 dnode_t *dn = dsfra->dsfra_dnode;
447
448 mutex_exit(&dn->dn_mtx);
449 dnode_sync_free_range_impl(dn, blkid, nblks,
450 dsfra->dsfra_free_indirects, dsfra->dsfra_tx);
451 mutex_enter(&dn->dn_mtx);
452 }
453
454 /*
455 * Try to kick all the dnode's dbufs out of the cache...
456 */
457 void
dnode_evict_dbufs(dnode_t * dn)458 dnode_evict_dbufs(dnode_t *dn)
459 {
460 dmu_buf_impl_t db_marker;
461 dmu_buf_impl_t *db, *db_next;
462
463 mutex_enter(&dn->dn_dbufs_mtx);
464 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
465
466 #ifdef DEBUG
467 DB_DNODE_ENTER(db);
468 ASSERT3P(DB_DNODE(db), ==, dn);
469 DB_DNODE_EXIT(db);
470 #endif /* DEBUG */
471
472 mutex_enter(&db->db_mtx);
473 if (db->db_state != DB_EVICTING &&
474 zfs_refcount_is_zero(&db->db_holds)) {
475 db_marker.db_level = db->db_level;
476 db_marker.db_blkid = db->db_blkid;
477 db_marker.db_state = DB_SEARCH;
478 avl_insert_here(&dn->dn_dbufs, &db_marker, db,
479 AVL_BEFORE);
480
481 /*
482 * We need to use the "marker" dbuf rather than
483 * simply getting the next dbuf, because
484 * dbuf_destroy() may actually remove multiple dbufs.
485 * It can call itself recursively on the parent dbuf,
486 * which may also be removed from dn_dbufs. The code
487 * flow would look like:
488 *
489 * dbuf_destroy():
490 * dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
491 * if (!cacheable || pending_evict)
492 * dbuf_destroy()
493 */
494 dbuf_destroy(db);
495
496 db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker);
497 avl_remove(&dn->dn_dbufs, &db_marker);
498 } else {
499 db->db_pending_evict = TRUE;
500 mutex_exit(&db->db_mtx);
501 db_next = AVL_NEXT(&dn->dn_dbufs, db);
502 }
503 }
504 mutex_exit(&dn->dn_dbufs_mtx);
505
506 dnode_evict_bonus(dn);
507 }
508
509 void
dnode_evict_bonus(dnode_t * dn)510 dnode_evict_bonus(dnode_t *dn)
511 {
512 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
513 if (dn->dn_bonus != NULL) {
514 if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
515 mutex_enter(&dn->dn_bonus->db_mtx);
516 dbuf_destroy(dn->dn_bonus);
517 dn->dn_bonus = NULL;
518 } else {
519 dn->dn_bonus->db_pending_evict = TRUE;
520 }
521 }
522 rw_exit(&dn->dn_struct_rwlock);
523 }
524
525 static void
dnode_undirty_dbufs(list_t * list)526 dnode_undirty_dbufs(list_t *list)
527 {
528 dbuf_dirty_record_t *dr;
529
530 while (dr = list_head(list)) {
531 dmu_buf_impl_t *db = dr->dr_dbuf;
532 uint64_t txg = dr->dr_txg;
533
534 if (db->db_level != 0)
535 dnode_undirty_dbufs(&dr->dt.di.dr_children);
536
537 mutex_enter(&db->db_mtx);
538 /* XXX - use dbuf_undirty()? */
539 list_remove(list, dr);
540 ASSERT(db->db_last_dirty == dr);
541 db->db_last_dirty = NULL;
542 db->db_dirtycnt -= 1;
543 if (db->db_level == 0) {
544 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
545 dr->dt.dl.dr_data == db->db_buf);
546 dbuf_unoverride(dr);
547 } else {
548 mutex_destroy(&dr->dt.di.dr_mtx);
549 list_destroy(&dr->dt.di.dr_children);
550 }
551 kmem_free(dr, sizeof (dbuf_dirty_record_t));
552 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
553 }
554 }
555
556 static void
dnode_sync_free(dnode_t * dn,dmu_tx_t * tx)557 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
558 {
559 int txgoff = tx->tx_txg & TXG_MASK;
560
561 ASSERT(dmu_tx_is_syncing(tx));
562
563 /*
564 * Our contents should have been freed in dnode_sync() by the
565 * free range record inserted by the caller of dnode_free().
566 */
567 ASSERT0(DN_USED_BYTES(dn->dn_phys));
568 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
569
570 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
571 dnode_evict_dbufs(dn);
572
573 /*
574 * XXX - It would be nice to assert this, but we may still
575 * have residual holds from async evictions from the arc...
576 *
577 * zfs_obj_to_path() also depends on this being
578 * commented out.
579 *
580 * ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
581 */
582
583 /* Undirty next bits */
584 dn->dn_next_nlevels[txgoff] = 0;
585 dn->dn_next_indblkshift[txgoff] = 0;
586 dn->dn_next_blksz[txgoff] = 0;
587 dn->dn_next_maxblkid[txgoff] = 0;
588
589 /* ASSERT(blkptrs are zero); */
590 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
591 ASSERT(dn->dn_type != DMU_OT_NONE);
592
593 ASSERT(dn->dn_free_txg > 0);
594 if (dn->dn_allocated_txg != dn->dn_free_txg)
595 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
596 bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots);
597 dnode_free_interior_slots(dn);
598
599 mutex_enter(&dn->dn_mtx);
600 dn->dn_type = DMU_OT_NONE;
601 dn->dn_maxblkid = 0;
602 dn->dn_allocated_txg = 0;
603 dn->dn_free_txg = 0;
604 dn->dn_have_spill = B_FALSE;
605 dn->dn_num_slots = 1;
606 mutex_exit(&dn->dn_mtx);
607
608 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
609
610 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
611 /*
612 * Now that we've released our hold, the dnode may
613 * be evicted, so we mustn't access it.
614 */
615 }
616
617 /*
618 * Write out the dnode's dirty buffers.
619 */
620 void
dnode_sync(dnode_t * dn,dmu_tx_t * tx)621 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
622 {
623 objset_t *os = dn->dn_objset;
624 dnode_phys_t *dnp = dn->dn_phys;
625 int txgoff = tx->tx_txg & TXG_MASK;
626 list_t *list = &dn->dn_dirty_records[txgoff];
627 static const dnode_phys_t zerodn = { 0 };
628 boolean_t kill_spill = B_FALSE;
629
630 ASSERT(dmu_tx_is_syncing(tx));
631 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
632 ASSERT(dnp->dn_type != DMU_OT_NONE ||
633 bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
634 DNODE_VERIFY(dn);
635
636 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
637
638 /*
639 * Do user accounting if it is enabled and this is not
640 * an encrypted receive.
641 */
642 if (dmu_objset_userused_enabled(os) &&
643 !DMU_OBJECT_IS_SPECIAL(dn->dn_object) &&
644 (!os->os_encrypted || !dmu_objset_is_receiving(os))) {
645 mutex_enter(&dn->dn_mtx);
646 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
647 dn->dn_oldflags = dn->dn_phys->dn_flags;
648 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
649 if (dmu_objset_userobjused_enabled(dn->dn_objset))
650 dn->dn_phys->dn_flags |=
651 DNODE_FLAG_USEROBJUSED_ACCOUNTED;
652 mutex_exit(&dn->dn_mtx);
653 dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
654 } else {
655 /* Once we account for it, we should always account for it */
656 ASSERT(!(dn->dn_phys->dn_flags &
657 DNODE_FLAG_USERUSED_ACCOUNTED));
658 ASSERT(!(dn->dn_phys->dn_flags &
659 DNODE_FLAG_USEROBJUSED_ACCOUNTED));
660 }
661
662 mutex_enter(&dn->dn_mtx);
663 if (dn->dn_allocated_txg == tx->tx_txg) {
664 /* The dnode is newly allocated or reallocated */
665 if (dnp->dn_type == DMU_OT_NONE) {
666 /* this is a first alloc, not a realloc */
667 dnp->dn_nlevels = 1;
668 dnp->dn_nblkptr = dn->dn_nblkptr;
669 }
670
671 dnp->dn_type = dn->dn_type;
672 dnp->dn_bonustype = dn->dn_bonustype;
673 dnp->dn_bonuslen = dn->dn_bonuslen;
674 }
675
676 dnp->dn_extra_slots = dn->dn_num_slots - 1;
677
678 ASSERT(dnp->dn_nlevels > 1 ||
679 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
680 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
681 BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
682 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
683 ASSERT(dnp->dn_nlevels < 2 ||
684 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
685 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
686
687 if (dn->dn_next_type[txgoff] != 0) {
688 dnp->dn_type = dn->dn_type;
689 dn->dn_next_type[txgoff] = 0;
690 }
691
692 if (dn->dn_next_blksz[txgoff] != 0) {
693 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
694 SPA_MINBLOCKSIZE) == 0);
695 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
696 dn->dn_maxblkid == 0 || list_head(list) != NULL ||
697 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
698 dnp->dn_datablkszsec ||
699 !range_tree_is_empty(dn->dn_free_ranges[txgoff]));
700 dnp->dn_datablkszsec =
701 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
702 dn->dn_next_blksz[txgoff] = 0;
703 }
704
705 if (dn->dn_next_bonuslen[txgoff] != 0) {
706 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
707 dnp->dn_bonuslen = 0;
708 else
709 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
710 ASSERT(dnp->dn_bonuslen <=
711 DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1));
712 dn->dn_next_bonuslen[txgoff] = 0;
713 }
714
715 if (dn->dn_next_bonustype[txgoff] != 0) {
716 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
717 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
718 dn->dn_next_bonustype[txgoff] = 0;
719 }
720
721 boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
722 dn->dn_free_txg <= tx->tx_txg;
723
724 /*
725 * Remove the spill block if we have been explicitly asked to
726 * remove it, or if the object is being removed.
727 */
728 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
729 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
730 kill_spill = B_TRUE;
731 dn->dn_rm_spillblk[txgoff] = 0;
732 }
733
734 if (dn->dn_next_indblkshift[txgoff] != 0) {
735 ASSERT(dnp->dn_nlevels == 1);
736 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
737 dn->dn_next_indblkshift[txgoff] = 0;
738 }
739
740 /*
741 * Just take the live (open-context) values for checksum and compress.
742 * Strictly speaking it's a future leak, but nothing bad happens if we
743 * start using the new checksum or compress algorithm a little early.
744 */
745 dnp->dn_checksum = dn->dn_checksum;
746 dnp->dn_compress = dn->dn_compress;
747
748 mutex_exit(&dn->dn_mtx);
749
750 if (kill_spill) {
751 free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx);
752 mutex_enter(&dn->dn_mtx);
753 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
754 mutex_exit(&dn->dn_mtx);
755 }
756
757 /* process all the "freed" ranges in the file */
758 if (dn->dn_free_ranges[txgoff] != NULL) {
759 dnode_sync_free_range_arg_t dsfra;
760 dsfra.dsfra_dnode = dn;
761 dsfra.dsfra_tx = tx;
762 dsfra.dsfra_free_indirects = freeing_dnode;
763 mutex_enter(&dn->dn_mtx);
764 if (freeing_dnode) {
765 ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff],
766 0, dn->dn_maxblkid + 1));
767 }
768 /*
769 * Because dnode_sync_free_range() must drop dn_mtx during its
770 * processing, using it as a callback to range_tree_vacate() is
771 * not safe. No other operations (besides destroy) are allowed
772 * once range_tree_vacate() has begun, and dropping dn_mtx
773 * would leave a window open for another thread to observe that
774 * invalid (and unsafe) state.
775 */
776 range_tree_walk(dn->dn_free_ranges[txgoff],
777 dnode_sync_free_range, &dsfra);
778 range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
779 range_tree_destroy(dn->dn_free_ranges[txgoff]);
780 dn->dn_free_ranges[txgoff] = NULL;
781 mutex_exit(&dn->dn_mtx);
782 }
783
784 if (freeing_dnode) {
785 dn->dn_objset->os_freed_dnodes++;
786 dnode_sync_free(dn, tx);
787 return;
788 }
789
790 if (dn->dn_num_slots > DNODE_MIN_SLOTS) {
791 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
792 mutex_enter(&ds->ds_lock);
793 ds->ds_feature_activation_needed[SPA_FEATURE_LARGE_DNODE] =
794 B_TRUE;
795 mutex_exit(&ds->ds_lock);
796 }
797
798 if (dn->dn_next_nlevels[txgoff]) {
799 dnode_increase_indirection(dn, tx);
800 dn->dn_next_nlevels[txgoff] = 0;
801 }
802
803 /*
804 * This must be done after dnode_sync_free_range()
805 * and dnode_increase_indirection(). See dnode_new_blkid()
806 * for an explanation of the high bit being set.
807 */
808 if (dn->dn_next_maxblkid[txgoff]) {
809 mutex_enter(&dn->dn_mtx);
810 dnp->dn_maxblkid =
811 dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET;
812 dn->dn_next_maxblkid[txgoff] = 0;
813 mutex_exit(&dn->dn_mtx);
814 }
815
816 if (dn->dn_next_nblkptr[txgoff]) {
817 /* this should only happen on a realloc */
818 ASSERT(dn->dn_allocated_txg == tx->tx_txg);
819 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
820 /* zero the new blkptrs we are gaining */
821 bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
822 sizeof (blkptr_t) *
823 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
824 #ifdef ZFS_DEBUG
825 } else {
826 int i;
827 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
828 /* the blkptrs we are losing better be unallocated */
829 for (i = dn->dn_next_nblkptr[txgoff];
830 i < dnp->dn_nblkptr; i++)
831 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
832 #endif
833 }
834 mutex_enter(&dn->dn_mtx);
835 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
836 dn->dn_next_nblkptr[txgoff] = 0;
837 mutex_exit(&dn->dn_mtx);
838 }
839
840 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
841
842 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
843 ASSERT3P(list_head(list), ==, NULL);
844 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
845 }
846
847 /*
848 * Although we have dropped our reference to the dnode, it
849 * can't be evicted until its written, and we haven't yet
850 * initiated the IO for the dnode's dbuf.
851 */
852 }
853