xref: /freebsd/sys/contrib/openzfs/module/zfs/bptree.c (revision 4731124cace5e7a0224e29784617d2856e5c59ab)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24  */
25 
26 #include <sys/arc.h>
27 #include <sys/bptree.h>
28 #include <sys/dmu.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dnode.h>
36 #include <sys/spa.h>
37 
38 /*
39  * A bptree is a queue of root block pointers from destroyed datasets. When a
40  * dataset is destroyed its root block pointer is put on the end of the pool's
41  * bptree queue so the dataset's blocks can be freed asynchronously by
42  * dsl_scan_sync. This allows the delete operation to finish without traversing
43  * all the dataset's blocks.
44  *
45  * Note that while bt_begin and bt_end are only ever incremented in this code,
46  * they are effectively reset to 0 every time the entire bptree is freed because
47  * the bptree's object is destroyed and re-created.
48  */
49 
50 struct bptree_args {
51 	bptree_phys_t *ba_phys;	/* data in bonus buffer, dirtied if freeing */
52 	boolean_t ba_free;	/* true if freeing during traversal */
53 
54 	bptree_itor_t *ba_func;	/* function to call for each blockpointer */
55 	void *ba_arg;		/* caller supplied argument to ba_func */
56 	dmu_tx_t *ba_tx;	/* caller supplied tx, NULL if not freeing */
57 } bptree_args_t;
58 
59 uint64_t
60 bptree_alloc(objset_t *os, dmu_tx_t *tx)
61 {
62 	uint64_t obj;
63 	dmu_buf_t *db;
64 	bptree_phys_t *bt;
65 
66 	obj = dmu_object_alloc(os, DMU_OTN_UINT64_METADATA,
67 	    SPA_OLD_MAXBLOCKSIZE, DMU_OTN_UINT64_METADATA,
68 	    sizeof (bptree_phys_t), tx);
69 
70 	/*
71 	 * Bonus buffer contents are already initialized to 0, but for
72 	 * readability we make it explicit.
73 	 */
74 	VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
75 	dmu_buf_will_dirty(db, tx);
76 	bt = db->db_data;
77 	bt->bt_begin = 0;
78 	bt->bt_end = 0;
79 	bt->bt_bytes = 0;
80 	bt->bt_comp = 0;
81 	bt->bt_uncomp = 0;
82 	dmu_buf_rele(db, FTAG);
83 
84 	return (obj);
85 }
86 
87 int
88 bptree_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
89 {
90 	dmu_buf_t *db;
91 	bptree_phys_t *bt;
92 
93 	VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
94 	bt = db->db_data;
95 	ASSERT3U(bt->bt_begin, ==, bt->bt_end);
96 	ASSERT0(bt->bt_bytes);
97 	ASSERT0(bt->bt_comp);
98 	ASSERT0(bt->bt_uncomp);
99 	dmu_buf_rele(db, FTAG);
100 
101 	return (dmu_object_free(os, obj, tx));
102 }
103 
104 boolean_t
105 bptree_is_empty(objset_t *os, uint64_t obj)
106 {
107 	dmu_buf_t *db;
108 	bptree_phys_t *bt;
109 	boolean_t rv;
110 
111 	VERIFY0(dmu_bonus_hold(os, obj, FTAG, &db));
112 	bt = db->db_data;
113 	rv = (bt->bt_begin == bt->bt_end);
114 	dmu_buf_rele(db, FTAG);
115 	return (rv);
116 }
117 
118 void
119 bptree_add(objset_t *os, uint64_t obj, blkptr_t *bp, uint64_t birth_txg,
120     uint64_t bytes, uint64_t comp, uint64_t uncomp, dmu_tx_t *tx)
121 {
122 	dmu_buf_t *db;
123 	bptree_phys_t *bt;
124 	bptree_entry_phys_t *bte;
125 
126 	/*
127 	 * bptree objects are in the pool mos, therefore they can only be
128 	 * modified in syncing context. Furthermore, this is only modified
129 	 * by the sync thread, so no locking is necessary.
130 	 */
131 	ASSERT(dmu_tx_is_syncing(tx));
132 
133 	VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
134 	bt = db->db_data;
135 
136 	bte = kmem_zalloc(sizeof (*bte), KM_SLEEP);
137 	bte->be_birth_txg = birth_txg;
138 	bte->be_bp = *bp;
139 	dmu_write(os, obj, bt->bt_end * sizeof (*bte), sizeof (*bte), bte, tx);
140 	kmem_free(bte, sizeof (*bte));
141 
142 	dmu_buf_will_dirty(db, tx);
143 	bt->bt_end++;
144 	bt->bt_bytes += bytes;
145 	bt->bt_comp += comp;
146 	bt->bt_uncomp += uncomp;
147 	dmu_buf_rele(db, FTAG);
148 }
149 
150 static int
151 bptree_visit_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
152     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
153 {
154 	(void) zilog, (void) dnp;
155 	int err;
156 	struct bptree_args *ba = arg;
157 
158 	if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
159 	    BP_IS_REDACTED(bp))
160 		return (0);
161 
162 	err = ba->ba_func(ba->ba_arg, bp, ba->ba_tx);
163 	if (err == 0 && ba->ba_free) {
164 		ba->ba_phys->bt_bytes -= bp_get_dsize_sync(spa, bp);
165 		ba->ba_phys->bt_comp -= BP_GET_PSIZE(bp);
166 		ba->ba_phys->bt_uncomp -= BP_GET_UCSIZE(bp);
167 	}
168 	return (err);
169 }
170 
171 /*
172  * If "free" is set:
173  *  - It is assumed that "func" will be freeing the block pointers.
174  *  - If "func" returns nonzero, the bookmark will be remembered and
175  *    iteration will be restarted from this point on next invocation.
176  *  - If an i/o error is encountered (e.g. "func" returns EIO or ECKSUM),
177  *    bptree_iterate will remember the bookmark, continue traversing
178  *    any additional entries, and return 0.
179  *
180  * If "free" is not set, traversal will stop and return an error if
181  * an i/o error is encountered.
182  *
183  * In either case, if zfs_free_leak_on_eio is set, i/o errors will be
184  * ignored and traversal will continue (i.e. TRAVERSE_HARD will be passed to
185  * traverse_dataset_destroyed()).
186  */
187 int
188 bptree_iterate(objset_t *os, uint64_t obj, boolean_t free, bptree_itor_t func,
189     void *arg, dmu_tx_t *tx)
190 {
191 	boolean_t ioerr = B_FALSE;
192 	int err;
193 	uint64_t i;
194 	dmu_buf_t *db;
195 	struct bptree_args ba;
196 
197 	ASSERT(!free || dmu_tx_is_syncing(tx));
198 
199 	err = dmu_bonus_hold(os, obj, FTAG, &db);
200 	if (err != 0)
201 		return (err);
202 
203 	if (free)
204 		dmu_buf_will_dirty(db, tx);
205 
206 	ba.ba_phys = db->db_data;
207 	ba.ba_free = free;
208 	ba.ba_func = func;
209 	ba.ba_arg = arg;
210 	ba.ba_tx = tx;
211 
212 	err = 0;
213 	for (i = ba.ba_phys->bt_begin; i < ba.ba_phys->bt_end; i++) {
214 		bptree_entry_phys_t bte;
215 		int flags = TRAVERSE_PREFETCH_METADATA | TRAVERSE_POST |
216 		    TRAVERSE_NO_DECRYPT;
217 
218 		err = dmu_read(os, obj, i * sizeof (bte), sizeof (bte),
219 		    &bte, DMU_READ_NO_PREFETCH);
220 		if (err != 0)
221 			break;
222 
223 		if (zfs_free_leak_on_eio)
224 			flags |= TRAVERSE_HARD;
225 		zfs_dbgmsg("bptree index %lld: traversing from min_txg=%lld "
226 		    "bookmark %lld/%lld/%lld/%lld",
227 		    (longlong_t)i,
228 		    (longlong_t)bte.be_birth_txg,
229 		    (longlong_t)bte.be_zb.zb_objset,
230 		    (longlong_t)bte.be_zb.zb_object,
231 		    (longlong_t)bte.be_zb.zb_level,
232 		    (longlong_t)bte.be_zb.zb_blkid);
233 		err = traverse_dataset_destroyed(os->os_spa, &bte.be_bp,
234 		    bte.be_birth_txg, &bte.be_zb, flags,
235 		    bptree_visit_cb, &ba);
236 		if (free) {
237 			/*
238 			 * The callback has freed the visited block pointers.
239 			 * Record our traversal progress on disk, either by
240 			 * updating this record's bookmark, or by logically
241 			 * removing this record by advancing bt_begin.
242 			 */
243 			if (err != 0) {
244 				/* save bookmark for future resume */
245 				ASSERT3U(bte.be_zb.zb_objset, ==,
246 				    ZB_DESTROYED_OBJSET);
247 				ASSERT0(bte.be_zb.zb_level);
248 				dmu_write(os, obj, i * sizeof (bte),
249 				    sizeof (bte), &bte, tx);
250 				if (err == EIO || err == ECKSUM ||
251 				    err == ENXIO) {
252 					/*
253 					 * Skip the rest of this tree and
254 					 * continue on to the next entry.
255 					 */
256 					err = 0;
257 					ioerr = B_TRUE;
258 				} else {
259 					break;
260 				}
261 			} else if (ioerr) {
262 				/*
263 				 * This entry is finished, but there were
264 				 * i/o errors on previous entries, so we
265 				 * can't adjust bt_begin.  Set this entry's
266 				 * be_birth_txg such that it will be
267 				 * treated as a no-op in future traversals.
268 				 */
269 				bte.be_birth_txg = UINT64_MAX;
270 				dmu_write(os, obj, i * sizeof (bte),
271 				    sizeof (bte), &bte, tx);
272 			}
273 
274 			if (!ioerr) {
275 				ba.ba_phys->bt_begin++;
276 				(void) dmu_free_range(os, obj,
277 				    i * sizeof (bte), sizeof (bte), tx);
278 			}
279 		} else if (err != 0) {
280 			break;
281 		}
282 	}
283 
284 	ASSERT(!free || err != 0 || ioerr ||
285 	    ba.ba_phys->bt_begin == ba.ba_phys->bt_end);
286 
287 	/* if all blocks are free there should be no used space */
288 	if (ba.ba_phys->bt_begin == ba.ba_phys->bt_end) {
289 		if (zfs_free_leak_on_eio) {
290 			ba.ba_phys->bt_bytes = 0;
291 			ba.ba_phys->bt_comp = 0;
292 			ba.ba_phys->bt_uncomp = 0;
293 		}
294 
295 		ASSERT0(ba.ba_phys->bt_bytes);
296 		ASSERT0(ba.ba_phys->bt_comp);
297 		ASSERT0(ba.ba_phys->bt_uncomp);
298 	}
299 
300 	dmu_buf_rele(db, FTAG);
301 
302 	return (err);
303 }
304