xref: /freebsd/sys/contrib/openzfs/module/zfs/ddt_log.c (revision 3877025f52ee205fe99ad4ff68229933d57e4bcb)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright (c) 2023, Klara Inc.
25  */
26 
27 #include <sys/zfs_context.h>
28 #include <sys/spa.h>
29 #include <sys/ddt.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu.h>
32 #include <sys/ddt_impl.h>
33 #include <sys/dnode.h>
34 #include <sys/dbuf.h>
35 #include <sys/zap.h>
36 #include <sys/zio_checksum.h>
37 
38 /*
39  * No more than this many txgs before swapping logs.
40  */
41 uint_t zfs_dedup_log_txg_max = 8;
42 
43 /*
44  * Max memory for the log AVL trees. If zfs_dedup_log_mem_max is zero at module
45  * load, it will be set to zfs_dedup_log_mem_max_percent% of total memory.
46  */
47 uint64_t zfs_dedup_log_mem_max = 0;
48 uint_t zfs_dedup_log_mem_max_percent = 1;
49 
50 
51 static kmem_cache_t *ddt_log_entry_flat_cache;
52 static kmem_cache_t *ddt_log_entry_trad_cache;
53 
54 #define	DDT_LOG_ENTRY_FLAT_SIZE	\
55 	(sizeof (ddt_log_entry_t) + DDT_FLAT_PHYS_SIZE)
56 #define	DDT_LOG_ENTRY_TRAD_SIZE	\
57 	(sizeof (ddt_log_entry_t) + DDT_TRAD_PHYS_SIZE)
58 
59 #define	DDT_LOG_ENTRY_SIZE(ddt)	\
60 	_DDT_PHYS_SWITCH(ddt, DDT_LOG_ENTRY_FLAT_SIZE, DDT_LOG_ENTRY_TRAD_SIZE)
61 
62 void
63 ddt_log_init(void)
64 {
65 	ddt_log_entry_flat_cache = kmem_cache_create("ddt_log_entry_flat_cache",
66 	    DDT_LOG_ENTRY_FLAT_SIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);
67 	ddt_log_entry_trad_cache = kmem_cache_create("ddt_log_entry_trad_cache",
68 	    DDT_LOG_ENTRY_TRAD_SIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);
69 
70 	/*
71 	 * Max memory for log AVL entries. At least 1M, because we need
72 	 * something (that's ~3800 entries per tree). They can say 100% if they
73 	 * want; it just means they're at the mercy of the the txg flush limit.
74 	 */
75 	if (zfs_dedup_log_mem_max == 0) {
76 		zfs_dedup_log_mem_max_percent =
77 		    MIN(zfs_dedup_log_mem_max_percent, 100);
78 		zfs_dedup_log_mem_max = (physmem * PAGESIZE) *
79 		    zfs_dedup_log_mem_max_percent / 100;
80 	}
81 	zfs_dedup_log_mem_max = MAX(zfs_dedup_log_mem_max, 1*1024*1024);
82 }
83 
84 void
85 ddt_log_fini(void)
86 {
87 	kmem_cache_destroy(ddt_log_entry_trad_cache);
88 	kmem_cache_destroy(ddt_log_entry_flat_cache);
89 }
90 
91 static void
92 ddt_log_name(ddt_t *ddt, char *name, uint_t n)
93 {
94 	snprintf(name, DDT_NAMELEN, DMU_POOL_DDT_LOG,
95 	    zio_checksum_table[ddt->ddt_checksum].ci_name, n);
96 }
97 
98 static void
99 ddt_log_update_header(ddt_t *ddt, ddt_log_t *ddl, dmu_tx_t *tx)
100 {
101 	dmu_buf_t *db;
102 	VERIFY0(dmu_bonus_hold(ddt->ddt_os, ddl->ddl_object, FTAG, &db));
103 	dmu_buf_will_dirty(db, tx);
104 
105 	ddt_log_header_t *hdr = (ddt_log_header_t *)db->db_data;
106 	DLH_SET_VERSION(hdr, 1);
107 	DLH_SET_FLAGS(hdr, ddl->ddl_flags);
108 	hdr->dlh_length = ddl->ddl_length;
109 	hdr->dlh_first_txg = ddl->ddl_first_txg;
110 	hdr->dlh_checkpoint = ddl->ddl_checkpoint;
111 
112 	dmu_buf_rele(db, FTAG);
113 }
114 
115 static void
116 ddt_log_create_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx)
117 {
118 	ASSERT3U(ddt->ddt_dir_object, >, 0);
119 	ASSERT0(ddl->ddl_object);
120 
121 	char name[DDT_NAMELEN];
122 	ddt_log_name(ddt, name, n);
123 
124 	ddl->ddl_object = dmu_object_alloc(ddt->ddt_os,
125 	    DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
126 	    DMU_OTN_UINT64_METADATA, sizeof (ddt_log_header_t), tx);
127 	VERIFY0(zap_add(ddt->ddt_os, ddt->ddt_dir_object, name,
128 	    sizeof (uint64_t), 1, &ddl->ddl_object, tx));
129 	ddl->ddl_length = 0;
130 	ddl->ddl_first_txg = tx->tx_txg;
131 	ddt_log_update_header(ddt, ddl, tx);
132 }
133 
134 static void
135 ddt_log_create(ddt_t *ddt, dmu_tx_t *tx)
136 {
137 	ddt_log_create_one(ddt, ddt->ddt_log_active, 0, tx);
138 	ddt_log_create_one(ddt, ddt->ddt_log_flushing, 1, tx);
139 }
140 
141 static void
142 ddt_log_destroy_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx)
143 {
144 	ASSERT3U(ddt->ddt_dir_object, >, 0);
145 
146 	if (ddl->ddl_object == 0)
147 		return;
148 
149 	ASSERT0(ddl->ddl_length);
150 
151 	char name[DDT_NAMELEN];
152 	ddt_log_name(ddt, name, n);
153 
154 	VERIFY0(zap_remove(ddt->ddt_os, ddt->ddt_dir_object, name, tx));
155 	VERIFY0(dmu_object_free(ddt->ddt_os, ddl->ddl_object, tx));
156 
157 	ddl->ddl_object = 0;
158 }
159 
160 void
161 ddt_log_destroy(ddt_t *ddt, dmu_tx_t *tx)
162 {
163 	ddt_log_destroy_one(ddt, ddt->ddt_log_active, 0, tx);
164 	ddt_log_destroy_one(ddt, ddt->ddt_log_flushing, 1, tx);
165 }
166 
167 static void
168 ddt_log_update_stats(ddt_t *ddt)
169 {
170 	/*
171 	 * Log object stats. We count the number of live entries in the log
172 	 * tree, even if there are more than on disk, and even if the same
173 	 * entry is on both append and flush trees, because that's more what
174 	 * the user expects to see. This does mean the on-disk size is not
175 	 * really correlated with the number of entries, but I don't think
176 	 * that's reasonable to expect anyway.
177 	 */
178 	dmu_object_info_t doi;
179 	uint64_t nblocks = 0;
180 	if (dmu_object_info(ddt->ddt_os, ddt->ddt_log_active->ddl_object,
181 	    &doi) == 0)
182 		nblocks += doi.doi_physical_blocks_512;
183 	if (dmu_object_info(ddt->ddt_os, ddt->ddt_log_flushing->ddl_object,
184 	    &doi) == 0)
185 		nblocks += doi.doi_physical_blocks_512;
186 
187 	ddt_object_t *ddo = &ddt->ddt_log_stats;
188 	ddo->ddo_count =
189 	    avl_numnodes(&ddt->ddt_log_active->ddl_tree) +
190 	    avl_numnodes(&ddt->ddt_log_flushing->ddl_tree);
191 	ddo->ddo_mspace = ddo->ddo_count * DDT_LOG_ENTRY_SIZE(ddt);
192 	ddo->ddo_dspace = nblocks << 9;
193 }
194 
195 void
196 ddt_log_begin(ddt_t *ddt, size_t nentries, dmu_tx_t *tx, ddt_log_update_t *dlu)
197 {
198 	ASSERT3U(nentries, >, 0);
199 	ASSERT0P(dlu->dlu_dbp);
200 
201 	if (ddt->ddt_log_active->ddl_object == 0)
202 		ddt_log_create(ddt, tx);
203 
204 	/*
205 	 * We want to store as many entries as we can in a block, but never
206 	 * split an entry across block boundaries.
207 	 */
208 	size_t reclen = P2ALIGN_TYPED(
209 	    sizeof (ddt_log_record_t) + sizeof (ddt_log_record_entry_t) +
210 	    DDT_PHYS_SIZE(ddt), sizeof (uint64_t), size_t);
211 	ASSERT3U(reclen, <=, UINT16_MAX);
212 	dlu->dlu_reclen = reclen;
213 
214 	VERIFY0(dnode_hold(ddt->ddt_os, ddt->ddt_log_active->ddl_object, FTAG,
215 	    &dlu->dlu_dn));
216 	dnode_set_storage_type(dlu->dlu_dn, DMU_OT_DDT_ZAP);
217 
218 	uint64_t nblocks = howmany(nentries,
219 	    dlu->dlu_dn->dn_datablksz / dlu->dlu_reclen);
220 	uint64_t offset = ddt->ddt_log_active->ddl_length;
221 	uint64_t length = nblocks * dlu->dlu_dn->dn_datablksz;
222 
223 	VERIFY0(dmu_buf_hold_array_by_dnode(dlu->dlu_dn, offset, length,
224 	    B_FALSE, FTAG, &dlu->dlu_ndbp, &dlu->dlu_dbp,
225 	    DMU_READ_NO_PREFETCH));
226 
227 	dlu->dlu_tx = tx;
228 	dlu->dlu_block = dlu->dlu_offset = 0;
229 }
230 
231 static ddt_log_entry_t *
232 ddt_log_alloc_entry(ddt_t *ddt)
233 {
234 	ddt_log_entry_t *ddle;
235 
236 	if (ddt->ddt_flags & DDT_FLAG_FLAT) {
237 		ddle = kmem_cache_alloc(ddt_log_entry_flat_cache, KM_SLEEP);
238 		memset(ddle, 0, DDT_LOG_ENTRY_FLAT_SIZE);
239 	} else {
240 		ddle = kmem_cache_alloc(ddt_log_entry_trad_cache, KM_SLEEP);
241 		memset(ddle, 0, DDT_LOG_ENTRY_TRAD_SIZE);
242 	}
243 
244 	return (ddle);
245 }
246 
247 static void
248 ddt_log_update_entry(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
249 {
250 	/* Create the log tree entry from a live or stored entry */
251 	avl_index_t where;
252 	ddt_log_entry_t *ddle =
253 	    avl_find(&ddl->ddl_tree, &ddlwe->ddlwe_key, &where);
254 	if (ddle == NULL) {
255 		ddle = ddt_log_alloc_entry(ddt);
256 		ddle->ddle_key = ddlwe->ddlwe_key;
257 		avl_insert(&ddl->ddl_tree, ddle, where);
258 	}
259 	ddle->ddle_type = ddlwe->ddlwe_type;
260 	ddle->ddle_class = ddlwe->ddlwe_class;
261 	memcpy(ddle->ddle_phys, &ddlwe->ddlwe_phys, DDT_PHYS_SIZE(ddt));
262 }
263 
264 void
265 ddt_log_entry(ddt_t *ddt, ddt_lightweight_entry_t *ddlwe, ddt_log_update_t *dlu)
266 {
267 	ASSERT3U(dlu->dlu_dbp, !=, NULL);
268 
269 	ddt_log_update_entry(ddt, ddt->ddt_log_active, ddlwe);
270 	ddt_histogram_add_entry(ddt, &ddt->ddt_log_histogram, ddlwe);
271 
272 	/* Get our block */
273 	ASSERT3U(dlu->dlu_block, <, dlu->dlu_ndbp);
274 	dmu_buf_t *db = dlu->dlu_dbp[dlu->dlu_block];
275 
276 	/*
277 	 * If this would take us past the end of the block, finish it and
278 	 * move to the next one.
279 	 */
280 	if (db->db_size < (dlu->dlu_offset + dlu->dlu_reclen)) {
281 		ASSERT3U(dlu->dlu_offset, >, 0);
282 		dmu_buf_fill_done(db, dlu->dlu_tx, B_FALSE);
283 		dlu->dlu_block++;
284 		dlu->dlu_offset = 0;
285 		ASSERT3U(dlu->dlu_block, <, dlu->dlu_ndbp);
286 		db = dlu->dlu_dbp[dlu->dlu_block];
287 	}
288 
289 	/*
290 	 * If this is the first time touching the block, inform the DMU that
291 	 * we will fill it, and zero it out.
292 	 */
293 	if (dlu->dlu_offset == 0) {
294 		dmu_buf_will_fill(db, dlu->dlu_tx, B_FALSE);
295 		memset(db->db_data, 0, db->db_size);
296 	}
297 
298 	/* Create the log record directly in the buffer */
299 	ddt_log_record_t *dlr = (db->db_data + dlu->dlu_offset);
300 	DLR_SET_TYPE(dlr, DLR_ENTRY);
301 	DLR_SET_RECLEN(dlr, dlu->dlu_reclen);
302 	DLR_SET_ENTRY_TYPE(dlr, ddlwe->ddlwe_type);
303 	DLR_SET_ENTRY_CLASS(dlr, ddlwe->ddlwe_class);
304 
305 	ddt_log_record_entry_t *dlre =
306 	    (ddt_log_record_entry_t *)&dlr->dlr_payload;
307 	dlre->dlre_key = ddlwe->ddlwe_key;
308 	memcpy(dlre->dlre_phys, &ddlwe->ddlwe_phys, DDT_PHYS_SIZE(ddt));
309 
310 	/* Advance offset for next record. */
311 	dlu->dlu_offset += dlu->dlu_reclen;
312 }
313 
314 void
315 ddt_log_commit(ddt_t *ddt, ddt_log_update_t *dlu)
316 {
317 	ASSERT3U(dlu->dlu_dbp, !=, NULL);
318 	ASSERT3U(dlu->dlu_block+1, ==, dlu->dlu_ndbp);
319 	ASSERT3U(dlu->dlu_offset, >, 0);
320 
321 	/*
322 	 * Close out the last block. Whatever we haven't used will be zeroed,
323 	 * which matches DLR_INVALID, so we can detect this during load.
324 	 */
325 	dmu_buf_fill_done(dlu->dlu_dbp[dlu->dlu_block], dlu->dlu_tx, B_FALSE);
326 
327 	dmu_buf_rele_array(dlu->dlu_dbp, dlu->dlu_ndbp, FTAG);
328 
329 	ddt->ddt_log_active->ddl_length +=
330 	    dlu->dlu_ndbp * (uint64_t)dlu->dlu_dn->dn_datablksz;
331 	dnode_rele(dlu->dlu_dn, FTAG);
332 
333 	ddt_log_update_header(ddt, ddt->ddt_log_active, dlu->dlu_tx);
334 
335 	memset(dlu, 0, sizeof (ddt_log_update_t));
336 
337 	ddt_log_update_stats(ddt);
338 }
339 
340 boolean_t
341 ddt_log_take_first(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
342 {
343 	ddt_log_entry_t *ddle = avl_first(&ddl->ddl_tree);
344 	if (ddle == NULL)
345 		return (B_FALSE);
346 
347 	DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, ddlwe);
348 
349 	ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, ddlwe);
350 
351 	avl_remove(&ddl->ddl_tree, ddle);
352 	kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
353 	    ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
354 
355 	return (B_TRUE);
356 }
357 
358 boolean_t
359 ddt_log_remove_key(ddt_t *ddt, ddt_log_t *ddl, const ddt_key_t *ddk)
360 {
361 	ddt_log_entry_t *ddle = avl_find(&ddl->ddl_tree, ddk, NULL);
362 	if (ddle == NULL)
363 		return (B_FALSE);
364 
365 	ddt_lightweight_entry_t ddlwe;
366 	DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, &ddlwe);
367 	ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, &ddlwe);
368 
369 	avl_remove(&ddl->ddl_tree, ddle);
370 	kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
371 	    ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
372 
373 	return (B_TRUE);
374 }
375 
376 boolean_t
377 ddt_log_find_key(ddt_t *ddt, const ddt_key_t *ddk,
378     ddt_lightweight_entry_t *ddlwe)
379 {
380 	ddt_log_entry_t *ddle =
381 	    avl_find(&ddt->ddt_log_active->ddl_tree, ddk, NULL);
382 	if (!ddle)
383 		ddle = avl_find(&ddt->ddt_log_flushing->ddl_tree, ddk, NULL);
384 	if (!ddle)
385 		return (B_FALSE);
386 	if (ddlwe)
387 		DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, ddlwe);
388 	return (B_TRUE);
389 }
390 
391 void
392 ddt_log_checkpoint(ddt_t *ddt, ddt_lightweight_entry_t *ddlwe, dmu_tx_t *tx)
393 {
394 	ddt_log_t *ddl = ddt->ddt_log_flushing;
395 
396 	ASSERT3U(ddl->ddl_object, !=, 0);
397 
398 #ifdef ZFS_DEBUG
399 	/*
400 	 * There should not be any entries on the log tree before the given
401 	 * checkpoint. Assert that this is the case.
402 	 */
403 	ddt_log_entry_t *ddle = avl_first(&ddl->ddl_tree);
404 	if (ddle != NULL)
405 		VERIFY3U(ddt_key_compare(&ddle->ddle_key, &ddlwe->ddlwe_key),
406 		    >, 0);
407 #endif
408 
409 	ddl->ddl_flags |= DDL_FLAG_CHECKPOINT;
410 	ddl->ddl_checkpoint = ddlwe->ddlwe_key;
411 	ddt_log_update_header(ddt, ddl, tx);
412 
413 	ddt_log_update_stats(ddt);
414 }
415 
416 void
417 ddt_log_truncate(ddt_t *ddt, dmu_tx_t *tx)
418 {
419 	ddt_log_t *ddl = ddt->ddt_log_flushing;
420 
421 	if (ddl->ddl_object == 0)
422 		return;
423 
424 	ASSERT(avl_is_empty(&ddl->ddl_tree));
425 
426 	/* Eject the entire object */
427 	dmu_free_range(ddt->ddt_os, ddl->ddl_object, 0, DMU_OBJECT_END, tx);
428 
429 	ddl->ddl_length = 0;
430 	ddl->ddl_flags &= ~DDL_FLAG_CHECKPOINT;
431 	memset(&ddl->ddl_checkpoint, 0, sizeof (ddt_key_t));
432 	ddt_log_update_header(ddt, ddl, tx);
433 
434 	ddt_log_update_stats(ddt);
435 }
436 
437 boolean_t
438 ddt_log_swap(ddt_t *ddt, dmu_tx_t *tx)
439 {
440 	/* Swap the logs. The old flushing one must be empty */
441 	VERIFY(avl_is_empty(&ddt->ddt_log_flushing->ddl_tree));
442 
443 	/*
444 	 * If there are still blocks on the flushing log, truncate it first.
445 	 * This can happen if there were entries on the flushing log that were
446 	 * removed in memory via ddt_lookup(); their vestigal remains are
447 	 * on disk.
448 	 */
449 	if (ddt->ddt_log_flushing->ddl_length > 0)
450 		ddt_log_truncate(ddt, tx);
451 
452 	/*
453 	 * Swap policy. We swap the logs (and so begin flushing) when the
454 	 * active tree grows too large, or when we haven't swapped it in
455 	 * some amount of time, or if something has requested the logs be
456 	 * flushed ASAP (see ddt_walk_init()).
457 	 */
458 
459 	/*
460 	 * The log tree is too large if the memory usage of its entries is over
461 	 * half of the memory limit. This effectively gives each log tree half
462 	 * the available memory.
463 	 */
464 	const boolean_t too_large =
465 	    (avl_numnodes(&ddt->ddt_log_active->ddl_tree) *
466 	    DDT_LOG_ENTRY_SIZE(ddt)) >= (zfs_dedup_log_mem_max >> 1);
467 
468 	const boolean_t too_old =
469 	    tx->tx_txg >=
470 	    (ddt->ddt_log_active->ddl_first_txg +
471 	    MAX(1, zfs_dedup_log_txg_max));
472 
473 	const boolean_t force =
474 	    ddt->ddt_log_active->ddl_first_txg <= ddt->ddt_flush_force_txg;
475 
476 	if (!(too_large || too_old || force))
477 		return (B_FALSE);
478 
479 	ddt_log_t *swap = ddt->ddt_log_active;
480 	ddt->ddt_log_active = ddt->ddt_log_flushing;
481 	ddt->ddt_log_flushing = swap;
482 
483 	ASSERT(ddt->ddt_log_active->ddl_flags & DDL_FLAG_FLUSHING);
484 	ddt->ddt_log_active->ddl_flags &=
485 	    ~(DDL_FLAG_FLUSHING | DDL_FLAG_CHECKPOINT);
486 
487 	ASSERT(!(ddt->ddt_log_flushing->ddl_flags & DDL_FLAG_FLUSHING));
488 	ddt->ddt_log_flushing->ddl_flags |= DDL_FLAG_FLUSHING;
489 
490 	ddt->ddt_log_active->ddl_first_txg = tx->tx_txg;
491 
492 	ddt_log_update_header(ddt, ddt->ddt_log_active, tx);
493 	ddt_log_update_header(ddt, ddt->ddt_log_flushing, tx);
494 
495 	ddt_log_update_stats(ddt);
496 
497 	return (B_TRUE);
498 }
499 
500 static inline void
501 ddt_log_load_entry(ddt_t *ddt, ddt_log_t *ddl, ddt_log_record_t *dlr,
502     const ddt_key_t *checkpoint)
503 {
504 	ASSERT3U(DLR_GET_TYPE(dlr), ==, DLR_ENTRY);
505 
506 	ddt_log_record_entry_t *dlre =
507 	    (ddt_log_record_entry_t *)dlr->dlr_payload;
508 	if (checkpoint != NULL &&
509 	    ddt_key_compare(&dlre->dlre_key, checkpoint) <= 0) {
510 		/* Skip pre-checkpoint entries; they're already flushed. */
511 		return;
512 	}
513 
514 	ddt_lightweight_entry_t ddlwe;
515 	ddlwe.ddlwe_type = DLR_GET_ENTRY_TYPE(dlr);
516 	ddlwe.ddlwe_class = DLR_GET_ENTRY_CLASS(dlr);
517 
518 	ddlwe.ddlwe_key = dlre->dlre_key;
519 	memcpy(&ddlwe.ddlwe_phys, dlre->dlre_phys, DDT_PHYS_SIZE(ddt));
520 
521 	ddt_log_update_entry(ddt, ddl, &ddlwe);
522 }
523 
524 static void
525 ddt_log_empty(ddt_t *ddt, ddt_log_t *ddl)
526 {
527 	void *cookie = NULL;
528 	ddt_log_entry_t *ddle;
529 	IMPLY(ddt->ddt_version == UINT64_MAX, avl_is_empty(&ddl->ddl_tree));
530 	while ((ddle =
531 	    avl_destroy_nodes(&ddl->ddl_tree, &cookie)) != NULL) {
532 		kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
533 		    ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
534 	}
535 	ASSERT(avl_is_empty(&ddl->ddl_tree));
536 }
537 
538 static int
539 ddt_log_load_one(ddt_t *ddt, uint_t n)
540 {
541 	ASSERT3U(n, <, 2);
542 
543 	ddt_log_t *ddl = &ddt->ddt_log[n];
544 
545 	char name[DDT_NAMELEN];
546 	ddt_log_name(ddt, name, n);
547 
548 	uint64_t obj;
549 	int err = zap_lookup(ddt->ddt_os, ddt->ddt_dir_object, name,
550 	    sizeof (uint64_t), 1, &obj);
551 	if (err == ENOENT)
552 		return (0);
553 	if (err != 0)
554 		return (err);
555 
556 	dnode_t *dn;
557 	err = dnode_hold(ddt->ddt_os, obj, FTAG, &dn);
558 	if (err != 0)
559 		return (err);
560 
561 	ddt_log_header_t hdr;
562 	dmu_buf_t *db;
563 	err = dmu_bonus_hold_by_dnode(dn, FTAG, &db, DMU_READ_NO_PREFETCH);
564 	if (err != 0) {
565 		dnode_rele(dn, FTAG);
566 		return (err);
567 	}
568 	memcpy(&hdr, db->db_data, sizeof (ddt_log_header_t));
569 	dmu_buf_rele(db, FTAG);
570 
571 	if (DLH_GET_VERSION(&hdr) != 1) {
572 		dnode_rele(dn, FTAG);
573 		zfs_dbgmsg("ddt_log_load: spa=%s ddt_log=%s "
574 		    "unknown version=%llu", spa_name(ddt->ddt_spa), name,
575 		    (u_longlong_t)DLH_GET_VERSION(&hdr));
576 		return (SET_ERROR(EINVAL));
577 	}
578 
579 	ddt_key_t *checkpoint = NULL;
580 	if (DLH_GET_FLAGS(&hdr) & DDL_FLAG_CHECKPOINT) {
581 		/*
582 		 * If the log has a checkpoint, then we can ignore any entries
583 		 * that have already been flushed.
584 		 */
585 		ASSERT(DLH_GET_FLAGS(&hdr) & DDL_FLAG_FLUSHING);
586 		checkpoint = &hdr.dlh_checkpoint;
587 	}
588 
589 	if (hdr.dlh_length > 0) {
590 		dmu_prefetch_by_dnode(dn, 0, 0, hdr.dlh_length,
591 		    ZIO_PRIORITY_SYNC_READ);
592 
593 		for (uint64_t offset = 0; offset < hdr.dlh_length;
594 		    offset += dn->dn_datablksz) {
595 			err = dmu_buf_hold_by_dnode(dn, offset, FTAG, &db,
596 			    DMU_READ_PREFETCH);
597 			if (err != 0) {
598 				dnode_rele(dn, FTAG);
599 				ddt_log_empty(ddt, ddl);
600 				return (err);
601 			}
602 
603 			uint64_t boffset = 0;
604 			while (boffset < db->db_size) {
605 				ddt_log_record_t *dlr =
606 				    (ddt_log_record_t *)(db->db_data + boffset);
607 
608 				/* Partially-filled block, skip the rest */
609 				if (DLR_GET_TYPE(dlr) == DLR_INVALID)
610 					break;
611 
612 				switch (DLR_GET_TYPE(dlr)) {
613 				case DLR_ENTRY:
614 					ddt_log_load_entry(ddt, ddl, dlr,
615 					    checkpoint);
616 					break;
617 
618 				default:
619 					dmu_buf_rele(db, FTAG);
620 					dnode_rele(dn, FTAG);
621 					ddt_log_empty(ddt, ddl);
622 					return (SET_ERROR(EINVAL));
623 				}
624 
625 				boffset += DLR_GET_RECLEN(dlr);
626 			}
627 
628 			dmu_buf_rele(db, FTAG);
629 		}
630 	}
631 
632 	dnode_rele(dn, FTAG);
633 
634 	ddl->ddl_object = obj;
635 	ddl->ddl_flags = DLH_GET_FLAGS(&hdr);
636 	ddl->ddl_length = hdr.dlh_length;
637 	ddl->ddl_first_txg = hdr.dlh_first_txg;
638 
639 	if (ddl->ddl_flags & DDL_FLAG_FLUSHING)
640 		ddt->ddt_log_flushing = ddl;
641 	else
642 		ddt->ddt_log_active = ddl;
643 
644 	return (0);
645 }
646 
647 int
648 ddt_log_load(ddt_t *ddt)
649 {
650 	int err;
651 
652 	if (spa_load_state(ddt->ddt_spa) == SPA_LOAD_TRYIMPORT) {
653 		/*
654 		 * The DDT is going to be freed again in a moment, so there's
655 		 * no point loading the log; it'll just slow down import.
656 		 */
657 		return (0);
658 	}
659 
660 	ASSERT0(ddt->ddt_log[0].ddl_object);
661 	ASSERT0(ddt->ddt_log[1].ddl_object);
662 	if (ddt->ddt_dir_object == 0) {
663 		/*
664 		 * If we're configured but the containing dir doesn't exist
665 		 * yet, then the log object can't possibly exist either.
666 		 */
667 		ASSERT3U(ddt->ddt_version, !=, UINT64_MAX);
668 		return (SET_ERROR(ENOENT));
669 	}
670 
671 	if ((err = ddt_log_load_one(ddt, 0)) != 0)
672 		return (err);
673 	if ((err = ddt_log_load_one(ddt, 1)) != 0)
674 		return (err);
675 
676 	VERIFY3P(ddt->ddt_log_active, !=, ddt->ddt_log_flushing);
677 	VERIFY(!(ddt->ddt_log_active->ddl_flags & DDL_FLAG_FLUSHING));
678 	VERIFY(!(ddt->ddt_log_active->ddl_flags & DDL_FLAG_CHECKPOINT));
679 	VERIFY(ddt->ddt_log_flushing->ddl_flags & DDL_FLAG_FLUSHING);
680 
681 	/*
682 	 * We have two finalisation tasks:
683 	 *
684 	 * - rebuild the histogram. We do this at the end rather than while
685 	 *   we're loading so we don't need to uncount and recount entries that
686 	 *   appear multiple times in the log.
687 	 *
688 	 * - remove entries from the flushing tree that are on both trees. This
689 	 *   happens when ddt_lookup() rehydrates an entry from the flushing
690 	 *   tree, as ddt_log_take_key() removes the entry from the in-memory
691 	 *   tree but doesn't remove it from disk.
692 	 */
693 
694 	/*
695 	 * We don't technically need a config lock here, since there shouldn't
696 	 * be pool config changes during DDT load. dva_get_dsize_sync() via
697 	 * ddt_stat_generate() is expecting it though, and it won't hurt
698 	 * anything, so we take it.
699 	 */
700 	spa_config_enter(ddt->ddt_spa, SCL_STATE, FTAG, RW_READER);
701 
702 	avl_tree_t *al = &ddt->ddt_log_active->ddl_tree;
703 	avl_tree_t *fl = &ddt->ddt_log_flushing->ddl_tree;
704 	ddt_log_entry_t *ae = avl_first(al);
705 	ddt_log_entry_t *fe = avl_first(fl);
706 	while (ae != NULL || fe != NULL) {
707 		ddt_log_entry_t *ddle;
708 		if (ae == NULL) {
709 			/* active exhausted, take flushing */
710 			ddle = fe;
711 			fe = AVL_NEXT(fl, fe);
712 		} else if (fe == NULL) {
713 			/* flushing exuhausted, take active */
714 			ddle = ae;
715 			ae = AVL_NEXT(al, ae);
716 		} else {
717 			/* compare active and flushing */
718 			int c = ddt_key_compare(&ae->ddle_key, &fe->ddle_key);
719 			if (c < 0) {
720 				/* active behind, take and advance */
721 				ddle = ae;
722 				ae = AVL_NEXT(al, ae);
723 			} else if (c > 0) {
724 				/* flushing behind, take and advance */
725 				ddle = fe;
726 				fe = AVL_NEXT(fl, fe);
727 			} else {
728 				/* match. remove from flushing, take active */
729 				ddle = fe;
730 				fe = AVL_NEXT(fl, fe);
731 				avl_remove(fl, ddle);
732 
733 				ddle = ae;
734 				ae = AVL_NEXT(al, ae);
735 			}
736 		}
737 
738 		ddt_lightweight_entry_t ddlwe;
739 		DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, &ddlwe);
740 		ddt_histogram_add_entry(ddt, &ddt->ddt_log_histogram, &ddlwe);
741 	}
742 
743 	spa_config_exit(ddt->ddt_spa, SCL_STATE, FTAG);
744 
745 	ddt_log_update_stats(ddt);
746 
747 	return (0);
748 }
749 
750 void
751 ddt_log_alloc(ddt_t *ddt)
752 {
753 	ASSERT0P(ddt->ddt_log_active);
754 	ASSERT0P(ddt->ddt_log_flushing);
755 
756 	avl_create(&ddt->ddt_log[0].ddl_tree, ddt_key_compare,
757 	    sizeof (ddt_log_entry_t), offsetof(ddt_log_entry_t, ddle_node));
758 	avl_create(&ddt->ddt_log[1].ddl_tree, ddt_key_compare,
759 	    sizeof (ddt_log_entry_t), offsetof(ddt_log_entry_t, ddle_node));
760 	ddt->ddt_log_active = &ddt->ddt_log[0];
761 	ddt->ddt_log_flushing = &ddt->ddt_log[1];
762 	ddt->ddt_log_flushing->ddl_flags |= DDL_FLAG_FLUSHING;
763 }
764 
765 void
766 ddt_log_free(ddt_t *ddt)
767 {
768 	ddt_log_empty(ddt, &ddt->ddt_log[0]);
769 	ddt_log_empty(ddt, &ddt->ddt_log[1]);
770 	avl_destroy(&ddt->ddt_log[0].ddl_tree);
771 	avl_destroy(&ddt->ddt_log[1].ddl_tree);
772 }
773 
774 ZFS_MODULE_PARAM(zfs_dedup, zfs_dedup_, log_txg_max, UINT, ZMOD_RW,
775 	"Max transactions before starting to flush dedup logs");
776 
777 ZFS_MODULE_PARAM(zfs_dedup, zfs_dedup_, log_mem_max, U64, ZMOD_RD,
778 	"Max memory for dedup logs");
779 
780 ZFS_MODULE_PARAM(zfs_dedup, zfs_dedup_, log_mem_max_percent, UINT, ZMOD_RD,
781 	"Max memory for dedup logs, as % of total memory");
782