1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 */
26
27 #include <sys/arc.h>
28 #include <sys/bptree.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_traverse.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/dnode.h>
37 #include <sys/spa.h>
38
39 /*
40 * A bptree is a queue of root block pointers from destroyed datasets. When a
41 * dataset is destroyed its root block pointer is put on the end of the pool's
42 * bptree queue so the dataset's blocks can be freed asynchronously by
43 * dsl_scan_sync. This allows the delete operation to finish without traversing
44 * all the dataset's blocks.
45 *
46 * Note that while bt_begin and bt_end are only ever incremented in this code,
47 * they are effectively reset to 0 every time the entire bptree is freed because
48 * the bptree's object is destroyed and re-created.
49 */
50
51 struct bptree_args {
52 bptree_phys_t *ba_phys; /* data in bonus buffer, dirtied if freeing */
53 boolean_t ba_free; /* true if freeing during traversal */
54
55 bptree_itor_t *ba_func; /* function to call for each blockpointer */
56 void *ba_arg; /* caller supplied argument to ba_func */
57 dmu_tx_t *ba_tx; /* caller supplied tx, NULL if not freeing */
58 } bptree_args_t;
59
60 uint64_t
bptree_alloc(objset_t * os,dmu_tx_t * tx)61 bptree_alloc(objset_t *os, dmu_tx_t *tx)
62 {
63 uint64_t obj;
64 dmu_buf_t *db;
65 bptree_phys_t *bt;
66
67 obj = dmu_object_alloc(os, DMU_OTN_UINT64_METADATA,
68 SPA_OLD_MAXBLOCKSIZE, DMU_OTN_UINT64_METADATA,
69 sizeof (bptree_phys_t), tx);
70
71 /*
72 * Bonus buffer contents are already initialized to 0, but for
73 * readability we make it explicit.
74 */
75 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
76 dmu_buf_will_dirty(db, tx);
77 bt = db->db_data;
78 bt->bt_begin = 0;
79 bt->bt_end = 0;
80 bt->bt_bytes = 0;
81 bt->bt_comp = 0;
82 bt->bt_uncomp = 0;
83 dmu_buf_rele(db, FTAG);
84
85 return (obj);
86 }
87
88 int
bptree_free(objset_t * os,uint64_t obj,dmu_tx_t * tx)89 bptree_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
90 {
91 dmu_buf_t *db;
92 bptree_phys_t *bt;
93
94 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
95 bt = db->db_data;
96 ASSERT3U(bt->bt_begin, ==, bt->bt_end);
97 ASSERT0(bt->bt_bytes);
98 ASSERT0(bt->bt_comp);
99 ASSERT0(bt->bt_uncomp);
100 dmu_buf_rele(db, FTAG);
101
102 return (dmu_object_free(os, obj, tx));
103 }
104
105 boolean_t
bptree_is_empty(objset_t * os,uint64_t obj)106 bptree_is_empty(objset_t *os, uint64_t obj)
107 {
108 dmu_buf_t *db;
109 bptree_phys_t *bt;
110 boolean_t rv;
111
112 VERIFY0(dmu_bonus_hold(os, obj, FTAG, &db));
113 bt = db->db_data;
114 rv = (bt->bt_begin == bt->bt_end);
115 dmu_buf_rele(db, FTAG);
116 return (rv);
117 }
118
119 void
bptree_add(objset_t * os,uint64_t obj,blkptr_t * bp,uint64_t birth_txg,uint64_t bytes,uint64_t comp,uint64_t uncomp,dmu_tx_t * tx)120 bptree_add(objset_t *os, uint64_t obj, blkptr_t *bp, uint64_t birth_txg,
121 uint64_t bytes, uint64_t comp, uint64_t uncomp, dmu_tx_t *tx)
122 {
123 dmu_buf_t *db;
124 bptree_phys_t *bt;
125 bptree_entry_phys_t *bte;
126
127 /*
128 * bptree objects are in the pool mos, therefore they can only be
129 * modified in syncing context. Furthermore, this is only modified
130 * by the sync thread, so no locking is necessary.
131 */
132 ASSERT(dmu_tx_is_syncing(tx));
133
134 VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
135 bt = db->db_data;
136
137 bte = kmem_zalloc(sizeof (*bte), KM_SLEEP);
138 bte->be_birth_txg = birth_txg;
139 bte->be_bp = *bp;
140 dmu_write(os, obj, bt->bt_end * sizeof (*bte), sizeof (*bte), bte, tx,
141 DMU_READ_NO_PREFETCH);
142 kmem_free(bte, sizeof (*bte));
143
144 dmu_buf_will_dirty(db, tx);
145 bt->bt_end++;
146 bt->bt_bytes += bytes;
147 bt->bt_comp += comp;
148 bt->bt_uncomp += uncomp;
149 dmu_buf_rele(db, FTAG);
150 }
151
152 static int
bptree_visit_cb(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,const zbookmark_phys_t * zb,const dnode_phys_t * dnp,void * arg)153 bptree_visit_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
154 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
155 {
156 (void) zilog, (void) dnp;
157 int err;
158 struct bptree_args *ba = arg;
159
160 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
161 BP_IS_REDACTED(bp))
162 return (0);
163
164 err = ba->ba_func(ba->ba_arg, bp, ba->ba_tx);
165 if (err == 0 && ba->ba_free) {
166 ba->ba_phys->bt_bytes -= bp_get_dsize_sync(spa, bp);
167 ba->ba_phys->bt_comp -= BP_GET_PSIZE(bp);
168 ba->ba_phys->bt_uncomp -= BP_GET_UCSIZE(bp);
169 }
170 return (err);
171 }
172
173 /*
174 * If "free" is set:
175 * - It is assumed that "func" will be freeing the block pointers.
176 * - If "func" returns nonzero, the bookmark will be remembered and
177 * iteration will be restarted from this point on next invocation.
178 * - If an i/o error is encountered (e.g. "func" returns EIO or ECKSUM),
179 * bptree_iterate will remember the bookmark, continue traversing
180 * any additional entries, and return 0.
181 *
182 * If "free" is not set, traversal will stop and return an error if
183 * an i/o error is encountered.
184 *
185 * In either case, if zfs_free_leak_on_eio is set, i/o errors will be
186 * ignored and traversal will continue (i.e. TRAVERSE_HARD will be passed to
187 * traverse_dataset_destroyed()).
188 */
189 int
bptree_iterate(objset_t * os,uint64_t obj,boolean_t free,bptree_itor_t func,void * arg,dmu_tx_t * tx)190 bptree_iterate(objset_t *os, uint64_t obj, boolean_t free, bptree_itor_t func,
191 void *arg, dmu_tx_t *tx)
192 {
193 boolean_t ioerr = B_FALSE;
194 int err;
195 uint64_t i;
196 dmu_buf_t *db;
197 struct bptree_args ba;
198
199 ASSERT(!free || dmu_tx_is_syncing(tx));
200
201 err = dmu_bonus_hold(os, obj, FTAG, &db);
202 if (err != 0)
203 return (err);
204
205 if (free)
206 dmu_buf_will_dirty(db, tx);
207
208 ba.ba_phys = db->db_data;
209 ba.ba_free = free;
210 ba.ba_func = func;
211 ba.ba_arg = arg;
212 ba.ba_tx = tx;
213
214 err = 0;
215 for (i = ba.ba_phys->bt_begin; i < ba.ba_phys->bt_end; i++) {
216 bptree_entry_phys_t bte;
217 int flags = TRAVERSE_PREFETCH_METADATA | TRAVERSE_POST |
218 TRAVERSE_NO_DECRYPT;
219
220 err = dmu_read(os, obj, i * sizeof (bte), sizeof (bte),
221 &bte, DMU_READ_NO_PREFETCH);
222 if (err != 0)
223 break;
224
225 if (zfs_free_leak_on_eio)
226 flags |= TRAVERSE_HARD;
227 zfs_dbgmsg("bptree index %lld: traversing from min_txg=%lld "
228 "bookmark %lld/%lld/%lld/%lld",
229 (longlong_t)i,
230 (longlong_t)bte.be_birth_txg,
231 (longlong_t)bte.be_zb.zb_objset,
232 (longlong_t)bte.be_zb.zb_object,
233 (longlong_t)bte.be_zb.zb_level,
234 (longlong_t)bte.be_zb.zb_blkid);
235 err = traverse_dataset_destroyed(os->os_spa, &bte.be_bp,
236 bte.be_birth_txg, &bte.be_zb, flags,
237 bptree_visit_cb, &ba);
238 if (free) {
239 /*
240 * The callback has freed the visited block pointers.
241 * Record our traversal progress on disk, either by
242 * updating this record's bookmark, or by logically
243 * removing this record by advancing bt_begin.
244 */
245 if (err != 0) {
246 /* save bookmark for future resume */
247 ASSERT3U(bte.be_zb.zb_objset, ==,
248 ZB_DESTROYED_OBJSET);
249 ASSERT0(bte.be_zb.zb_level);
250 dmu_write(os, obj, i * sizeof (bte),
251 sizeof (bte), &bte, tx,
252 DMU_READ_NO_PREFETCH);
253 if (err == EIO || err == ECKSUM ||
254 err == ENXIO) {
255 /*
256 * Skip the rest of this tree and
257 * continue on to the next entry.
258 */
259 err = 0;
260 ioerr = B_TRUE;
261 } else {
262 break;
263 }
264 } else if (ioerr) {
265 /*
266 * This entry is finished, but there were
267 * i/o errors on previous entries, so we
268 * can't adjust bt_begin. Set this entry's
269 * be_birth_txg such that it will be
270 * treated as a no-op in future traversals.
271 */
272 bte.be_birth_txg = UINT64_MAX;
273 dmu_write(os, obj, i * sizeof (bte),
274 sizeof (bte), &bte, tx,
275 DMU_READ_NO_PREFETCH);
276 }
277
278 if (!ioerr) {
279 ba.ba_phys->bt_begin++;
280 (void) dmu_free_range(os, obj,
281 i * sizeof (bte), sizeof (bte), tx);
282 }
283 } else if (err != 0) {
284 break;
285 }
286 }
287
288 ASSERT(!free || err != 0 || ioerr ||
289 ba.ba_phys->bt_begin == ba.ba_phys->bt_end);
290
291 /* if all blocks are free there should be no used space */
292 if (ba.ba_phys->bt_begin == ba.ba_phys->bt_end) {
293 if (zfs_free_leak_on_eio) {
294 ba.ba_phys->bt_bytes = 0;
295 ba.ba_phys->bt_comp = 0;
296 ba.ba_phys->bt_uncomp = 0;
297 }
298
299 ASSERT0(ba.ba_phys->bt_bytes);
300 ASSERT0(ba.ba_phys->bt_comp);
301 ASSERT0(ba.ba_phys->bt_uncomp);
302 }
303
304 dmu_buf_rele(db, FTAG);
305
306 return (err);
307 }
308