xref: /freebsd/sys/contrib/openzfs/module/zfs/ddt_log.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright (c) 2023, Klara Inc.
25  */
26 
27 #include <sys/zfs_context.h>
28 #include <sys/spa.h>
29 #include <sys/ddt.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu.h>
32 #include <sys/ddt_impl.h>
33 #include <sys/dnode.h>
34 #include <sys/dbuf.h>
35 #include <sys/zap.h>
36 #include <sys/zio_checksum.h>
37 
38 /*
39  * No more than this many txgs before swapping logs.
40  */
41 uint_t zfs_dedup_log_txg_max = 8;
42 
43 /*
44  * Max memory for the log AVL trees. If zfs_dedup_log_mem_max is zero at module
45  * load, it will be set to zfs_dedup_log_mem_max_percent% of total memory.
46  */
47 uint64_t zfs_dedup_log_mem_max = 0;
48 uint_t zfs_dedup_log_mem_max_percent = 1;
49 
50 
51 static kmem_cache_t *ddt_log_entry_flat_cache;
52 static kmem_cache_t *ddt_log_entry_trad_cache;
53 
54 #define	DDT_LOG_ENTRY_FLAT_SIZE	\
55 	(sizeof (ddt_log_entry_t) + DDT_FLAT_PHYS_SIZE)
56 #define	DDT_LOG_ENTRY_TRAD_SIZE	\
57 	(sizeof (ddt_log_entry_t) + DDT_TRAD_PHYS_SIZE)
58 
59 #define	DDT_LOG_ENTRY_SIZE(ddt)	\
60 	_DDT_PHYS_SWITCH(ddt, DDT_LOG_ENTRY_FLAT_SIZE, DDT_LOG_ENTRY_TRAD_SIZE)
61 
62 void
ddt_log_init(void)63 ddt_log_init(void)
64 {
65 	ddt_log_entry_flat_cache = kmem_cache_create("ddt_log_entry_flat_cache",
66 	    DDT_LOG_ENTRY_FLAT_SIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);
67 	ddt_log_entry_trad_cache = kmem_cache_create("ddt_log_entry_trad_cache",
68 	    DDT_LOG_ENTRY_TRAD_SIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);
69 
70 	/*
71 	 * Max memory for log AVL entries. At least 1M, because we need
72 	 * something (that's ~3800 entries per tree). They can say 100% if they
73 	 * want; it just means they're at the mercy of the the txg flush limit.
74 	 */
75 	if (zfs_dedup_log_mem_max == 0) {
76 		zfs_dedup_log_mem_max_percent =
77 		    MIN(zfs_dedup_log_mem_max_percent, 100);
78 		zfs_dedup_log_mem_max = (physmem * PAGESIZE) *
79 		    zfs_dedup_log_mem_max_percent / 100;
80 	}
81 	zfs_dedup_log_mem_max = MAX(zfs_dedup_log_mem_max, 1*1024*1024);
82 }
83 
84 void
ddt_log_fini(void)85 ddt_log_fini(void)
86 {
87 	kmem_cache_destroy(ddt_log_entry_trad_cache);
88 	kmem_cache_destroy(ddt_log_entry_flat_cache);
89 }
90 
91 static void
ddt_log_name(ddt_t * ddt,char * name,uint_t n)92 ddt_log_name(ddt_t *ddt, char *name, uint_t n)
93 {
94 	snprintf(name, DDT_NAMELEN, DMU_POOL_DDT_LOG,
95 	    zio_checksum_table[ddt->ddt_checksum].ci_name, n);
96 }
97 
98 static void
ddt_log_update_header(ddt_t * ddt,ddt_log_t * ddl,dmu_tx_t * tx)99 ddt_log_update_header(ddt_t *ddt, ddt_log_t *ddl, dmu_tx_t *tx)
100 {
101 	dmu_buf_t *db;
102 	VERIFY0(dmu_bonus_hold(ddt->ddt_os, ddl->ddl_object, FTAG, &db));
103 	dmu_buf_will_dirty(db, tx);
104 
105 	ddt_log_header_t *hdr = (ddt_log_header_t *)db->db_data;
106 	DLH_SET_VERSION(hdr, 1);
107 	DLH_SET_FLAGS(hdr, ddl->ddl_flags);
108 	hdr->dlh_length = ddl->ddl_length;
109 	hdr->dlh_first_txg = ddl->ddl_first_txg;
110 	hdr->dlh_checkpoint = ddl->ddl_checkpoint;
111 
112 	dmu_buf_rele(db, FTAG);
113 }
114 
115 static void
ddt_log_create_one(ddt_t * ddt,ddt_log_t * ddl,uint_t n,dmu_tx_t * tx)116 ddt_log_create_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx)
117 {
118 	ASSERT3U(ddt->ddt_dir_object, >, 0);
119 	ASSERT3U(ddl->ddl_object, ==, 0);
120 
121 	char name[DDT_NAMELEN];
122 	ddt_log_name(ddt, name, n);
123 
124 	ddl->ddl_object = dmu_object_alloc(ddt->ddt_os,
125 	    DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
126 	    DMU_OTN_UINT64_METADATA, sizeof (ddt_log_header_t), tx);
127 	VERIFY0(zap_add(ddt->ddt_os, ddt->ddt_dir_object, name,
128 	    sizeof (uint64_t), 1, &ddl->ddl_object, tx));
129 	ddl->ddl_length = 0;
130 	ddl->ddl_first_txg = tx->tx_txg;
131 	ddt_log_update_header(ddt, ddl, tx);
132 }
133 
134 static void
ddt_log_create(ddt_t * ddt,dmu_tx_t * tx)135 ddt_log_create(ddt_t *ddt, dmu_tx_t *tx)
136 {
137 	ddt_log_create_one(ddt, ddt->ddt_log_active, 0, tx);
138 	ddt_log_create_one(ddt, ddt->ddt_log_flushing, 1, tx);
139 }
140 
141 static void
ddt_log_destroy_one(ddt_t * ddt,ddt_log_t * ddl,uint_t n,dmu_tx_t * tx)142 ddt_log_destroy_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx)
143 {
144 	ASSERT3U(ddt->ddt_dir_object, >, 0);
145 
146 	if (ddl->ddl_object == 0)
147 		return;
148 
149 	ASSERT0(ddl->ddl_length);
150 
151 	char name[DDT_NAMELEN];
152 	ddt_log_name(ddt, name, n);
153 
154 	VERIFY0(zap_remove(ddt->ddt_os, ddt->ddt_dir_object, name, tx));
155 	VERIFY0(dmu_object_free(ddt->ddt_os, ddl->ddl_object, tx));
156 
157 	ddl->ddl_object = 0;
158 }
159 
160 void
ddt_log_destroy(ddt_t * ddt,dmu_tx_t * tx)161 ddt_log_destroy(ddt_t *ddt, dmu_tx_t *tx)
162 {
163 	ddt_log_destroy_one(ddt, ddt->ddt_log_active, 0, tx);
164 	ddt_log_destroy_one(ddt, ddt->ddt_log_flushing, 1, tx);
165 }
166 
167 static void
ddt_log_update_stats(ddt_t * ddt)168 ddt_log_update_stats(ddt_t *ddt)
169 {
170 	/*
171 	 * Log object stats. We count the number of live entries in the log
172 	 * tree, even if there are more than on disk, and even if the same
173 	 * entry is on both append and flush trees, because that's more what
174 	 * the user expects to see. This does mean the on-disk size is not
175 	 * really correlated with the number of entries, but I don't think
176 	 * that's reasonable to expect anyway.
177 	 */
178 	dmu_object_info_t doi;
179 	uint64_t nblocks;
180 	dmu_object_info(ddt->ddt_os, ddt->ddt_log_active->ddl_object, &doi);
181 	nblocks = doi.doi_physical_blocks_512;
182 	dmu_object_info(ddt->ddt_os, ddt->ddt_log_flushing->ddl_object, &doi);
183 	nblocks += doi.doi_physical_blocks_512;
184 
185 	ddt_object_t *ddo = &ddt->ddt_log_stats;
186 	ddo->ddo_count =
187 	    avl_numnodes(&ddt->ddt_log_active->ddl_tree) +
188 	    avl_numnodes(&ddt->ddt_log_flushing->ddl_tree);
189 	ddo->ddo_mspace = ddo->ddo_count * DDT_LOG_ENTRY_SIZE(ddt);
190 	ddo->ddo_dspace = nblocks << 9;
191 }
192 
193 void
ddt_log_begin(ddt_t * ddt,size_t nentries,dmu_tx_t * tx,ddt_log_update_t * dlu)194 ddt_log_begin(ddt_t *ddt, size_t nentries, dmu_tx_t *tx, ddt_log_update_t *dlu)
195 {
196 	ASSERT3U(nentries, >, 0);
197 	ASSERT3P(dlu->dlu_dbp, ==, NULL);
198 
199 	if (ddt->ddt_log_active->ddl_object == 0)
200 		ddt_log_create(ddt, tx);
201 
202 	/*
203 	 * We want to store as many entries as we can in a block, but never
204 	 * split an entry across block boundaries.
205 	 */
206 	size_t reclen = P2ALIGN_TYPED(
207 	    sizeof (ddt_log_record_t) + sizeof (ddt_log_record_entry_t) +
208 	    DDT_PHYS_SIZE(ddt), sizeof (uint64_t), size_t);
209 	ASSERT3U(reclen, <=, UINT16_MAX);
210 	dlu->dlu_reclen = reclen;
211 
212 	VERIFY0(dnode_hold(ddt->ddt_os, ddt->ddt_log_active->ddl_object, FTAG,
213 	    &dlu->dlu_dn));
214 	dnode_set_storage_type(dlu->dlu_dn, DMU_OT_DDT_ZAP);
215 
216 	uint64_t nblocks = howmany(nentries,
217 	    dlu->dlu_dn->dn_datablksz / dlu->dlu_reclen);
218 	uint64_t offset = ddt->ddt_log_active->ddl_length;
219 	uint64_t length = nblocks * dlu->dlu_dn->dn_datablksz;
220 
221 	VERIFY0(dmu_buf_hold_array_by_dnode(dlu->dlu_dn, offset, length,
222 	    B_FALSE, FTAG, &dlu->dlu_ndbp, &dlu->dlu_dbp,
223 	    DMU_READ_NO_PREFETCH));
224 
225 	dlu->dlu_tx = tx;
226 	dlu->dlu_block = dlu->dlu_offset = 0;
227 }
228 
229 static ddt_log_entry_t *
ddt_log_alloc_entry(ddt_t * ddt)230 ddt_log_alloc_entry(ddt_t *ddt)
231 {
232 	ddt_log_entry_t *ddle;
233 
234 	if (ddt->ddt_flags & DDT_FLAG_FLAT) {
235 		ddle = kmem_cache_alloc(ddt_log_entry_flat_cache, KM_SLEEP);
236 		memset(ddle, 0, DDT_LOG_ENTRY_FLAT_SIZE);
237 	} else {
238 		ddle = kmem_cache_alloc(ddt_log_entry_trad_cache, KM_SLEEP);
239 		memset(ddle, 0, DDT_LOG_ENTRY_TRAD_SIZE);
240 	}
241 
242 	return (ddle);
243 }
244 
245 static void
ddt_log_update_entry(ddt_t * ddt,ddt_log_t * ddl,ddt_lightweight_entry_t * ddlwe)246 ddt_log_update_entry(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
247 {
248 	/* Create the log tree entry from a live or stored entry */
249 	avl_index_t where;
250 	ddt_log_entry_t *ddle =
251 	    avl_find(&ddl->ddl_tree, &ddlwe->ddlwe_key, &where);
252 	if (ddle == NULL) {
253 		ddle = ddt_log_alloc_entry(ddt);
254 		ddle->ddle_key = ddlwe->ddlwe_key;
255 		avl_insert(&ddl->ddl_tree, ddle, where);
256 	}
257 	ddle->ddle_type = ddlwe->ddlwe_type;
258 	ddle->ddle_class = ddlwe->ddlwe_class;
259 	memcpy(ddle->ddle_phys, &ddlwe->ddlwe_phys, DDT_PHYS_SIZE(ddt));
260 }
261 
262 void
ddt_log_entry(ddt_t * ddt,ddt_lightweight_entry_t * ddlwe,ddt_log_update_t * dlu)263 ddt_log_entry(ddt_t *ddt, ddt_lightweight_entry_t *ddlwe, ddt_log_update_t *dlu)
264 {
265 	ASSERT3U(dlu->dlu_dbp, !=, NULL);
266 
267 	ddt_log_update_entry(ddt, ddt->ddt_log_active, ddlwe);
268 	ddt_histogram_add_entry(ddt, &ddt->ddt_log_histogram, ddlwe);
269 
270 	/* Get our block */
271 	ASSERT3U(dlu->dlu_block, <, dlu->dlu_ndbp);
272 	dmu_buf_t *db = dlu->dlu_dbp[dlu->dlu_block];
273 
274 	/*
275 	 * If this would take us past the end of the block, finish it and
276 	 * move to the next one.
277 	 */
278 	if (db->db_size < (dlu->dlu_offset + dlu->dlu_reclen)) {
279 		ASSERT3U(dlu->dlu_offset, >, 0);
280 		dmu_buf_fill_done(db, dlu->dlu_tx, B_FALSE);
281 		dlu->dlu_block++;
282 		dlu->dlu_offset = 0;
283 		ASSERT3U(dlu->dlu_block, <, dlu->dlu_ndbp);
284 		db = dlu->dlu_dbp[dlu->dlu_block];
285 	}
286 
287 	/*
288 	 * If this is the first time touching the block, inform the DMU that
289 	 * we will fill it, and zero it out.
290 	 */
291 	if (dlu->dlu_offset == 0) {
292 		dmu_buf_will_fill(db, dlu->dlu_tx, B_FALSE);
293 		memset(db->db_data, 0, db->db_size);
294 	}
295 
296 	/* Create the log record directly in the buffer */
297 	ddt_log_record_t *dlr = (db->db_data + dlu->dlu_offset);
298 	DLR_SET_TYPE(dlr, DLR_ENTRY);
299 	DLR_SET_RECLEN(dlr, dlu->dlu_reclen);
300 	DLR_SET_ENTRY_TYPE(dlr, ddlwe->ddlwe_type);
301 	DLR_SET_ENTRY_CLASS(dlr, ddlwe->ddlwe_class);
302 
303 	ddt_log_record_entry_t *dlre =
304 	    (ddt_log_record_entry_t *)&dlr->dlr_payload;
305 	dlre->dlre_key = ddlwe->ddlwe_key;
306 	memcpy(dlre->dlre_phys, &ddlwe->ddlwe_phys, DDT_PHYS_SIZE(ddt));
307 
308 	/* Advance offset for next record. */
309 	dlu->dlu_offset += dlu->dlu_reclen;
310 }
311 
312 void
ddt_log_commit(ddt_t * ddt,ddt_log_update_t * dlu)313 ddt_log_commit(ddt_t *ddt, ddt_log_update_t *dlu)
314 {
315 	ASSERT3U(dlu->dlu_dbp, !=, NULL);
316 	ASSERT3U(dlu->dlu_block+1, ==, dlu->dlu_ndbp);
317 	ASSERT3U(dlu->dlu_offset, >, 0);
318 
319 	/*
320 	 * Close out the last block. Whatever we haven't used will be zeroed,
321 	 * which matches DLR_INVALID, so we can detect this during load.
322 	 */
323 	dmu_buf_fill_done(dlu->dlu_dbp[dlu->dlu_block], dlu->dlu_tx, B_FALSE);
324 
325 	dmu_buf_rele_array(dlu->dlu_dbp, dlu->dlu_ndbp, FTAG);
326 
327 	ddt->ddt_log_active->ddl_length +=
328 	    dlu->dlu_ndbp * (uint64_t)dlu->dlu_dn->dn_datablksz;
329 	dnode_rele(dlu->dlu_dn, FTAG);
330 
331 	ddt_log_update_header(ddt, ddt->ddt_log_active, dlu->dlu_tx);
332 
333 	memset(dlu, 0, sizeof (ddt_log_update_t));
334 
335 	ddt_log_update_stats(ddt);
336 }
337 
338 boolean_t
ddt_log_take_first(ddt_t * ddt,ddt_log_t * ddl,ddt_lightweight_entry_t * ddlwe)339 ddt_log_take_first(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
340 {
341 	ddt_log_entry_t *ddle = avl_first(&ddl->ddl_tree);
342 	if (ddle == NULL)
343 		return (B_FALSE);
344 
345 	DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, ddlwe);
346 
347 	ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, ddlwe);
348 
349 	avl_remove(&ddl->ddl_tree, ddle);
350 	kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
351 	    ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
352 
353 	return (B_TRUE);
354 }
355 
356 boolean_t
ddt_log_remove_key(ddt_t * ddt,ddt_log_t * ddl,const ddt_key_t * ddk)357 ddt_log_remove_key(ddt_t *ddt, ddt_log_t *ddl, const ddt_key_t *ddk)
358 {
359 	ddt_log_entry_t *ddle = avl_find(&ddl->ddl_tree, ddk, NULL);
360 	if (ddle == NULL)
361 		return (B_FALSE);
362 
363 	ddt_lightweight_entry_t ddlwe;
364 	DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, &ddlwe);
365 	ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, &ddlwe);
366 
367 	avl_remove(&ddl->ddl_tree, ddle);
368 	kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
369 	    ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
370 
371 	return (B_TRUE);
372 }
373 
374 boolean_t
ddt_log_find_key(ddt_t * ddt,const ddt_key_t * ddk,ddt_lightweight_entry_t * ddlwe)375 ddt_log_find_key(ddt_t *ddt, const ddt_key_t *ddk,
376     ddt_lightweight_entry_t *ddlwe)
377 {
378 	ddt_log_entry_t *ddle =
379 	    avl_find(&ddt->ddt_log_active->ddl_tree, ddk, NULL);
380 	if (!ddle)
381 		ddle = avl_find(&ddt->ddt_log_flushing->ddl_tree, ddk, NULL);
382 	if (!ddle)
383 		return (B_FALSE);
384 	if (ddlwe)
385 		DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, ddlwe);
386 	return (B_TRUE);
387 }
388 
389 void
ddt_log_checkpoint(ddt_t * ddt,ddt_lightweight_entry_t * ddlwe,dmu_tx_t * tx)390 ddt_log_checkpoint(ddt_t *ddt, ddt_lightweight_entry_t *ddlwe, dmu_tx_t *tx)
391 {
392 	ddt_log_t *ddl = ddt->ddt_log_flushing;
393 
394 	ASSERT3U(ddl->ddl_object, !=, 0);
395 
396 #ifdef ZFS_DEBUG
397 	/*
398 	 * There should not be any entries on the log tree before the given
399 	 * checkpoint. Assert that this is the case.
400 	 */
401 	ddt_log_entry_t *ddle = avl_first(&ddl->ddl_tree);
402 	if (ddle != NULL)
403 		VERIFY3U(ddt_key_compare(&ddle->ddle_key, &ddlwe->ddlwe_key),
404 		    >, 0);
405 #endif
406 
407 	ddl->ddl_flags |= DDL_FLAG_CHECKPOINT;
408 	ddl->ddl_checkpoint = ddlwe->ddlwe_key;
409 	ddt_log_update_header(ddt, ddl, tx);
410 
411 	ddt_log_update_stats(ddt);
412 }
413 
414 void
ddt_log_truncate(ddt_t * ddt,dmu_tx_t * tx)415 ddt_log_truncate(ddt_t *ddt, dmu_tx_t *tx)
416 {
417 	ddt_log_t *ddl = ddt->ddt_log_flushing;
418 
419 	if (ddl->ddl_object == 0)
420 		return;
421 
422 	ASSERT(avl_is_empty(&ddl->ddl_tree));
423 
424 	/* Eject the entire object */
425 	dmu_free_range(ddt->ddt_os, ddl->ddl_object, 0, DMU_OBJECT_END, tx);
426 
427 	ddl->ddl_length = 0;
428 	ddl->ddl_flags &= ~DDL_FLAG_CHECKPOINT;
429 	memset(&ddl->ddl_checkpoint, 0, sizeof (ddt_key_t));
430 	ddt_log_update_header(ddt, ddl, tx);
431 
432 	ddt_log_update_stats(ddt);
433 }
434 
435 boolean_t
ddt_log_swap(ddt_t * ddt,dmu_tx_t * tx)436 ddt_log_swap(ddt_t *ddt, dmu_tx_t *tx)
437 {
438 	/* Swap the logs. The old flushing one must be empty */
439 	VERIFY(avl_is_empty(&ddt->ddt_log_flushing->ddl_tree));
440 
441 	/*
442 	 * If there are still blocks on the flushing log, truncate it first.
443 	 * This can happen if there were entries on the flushing log that were
444 	 * removed in memory via ddt_lookup(); their vestigal remains are
445 	 * on disk.
446 	 */
447 	if (ddt->ddt_log_flushing->ddl_length > 0)
448 		ddt_log_truncate(ddt, tx);
449 
450 	/*
451 	 * Swap policy. We swap the logs (and so begin flushing) when the
452 	 * active tree grows too large, or when we haven't swapped it in
453 	 * some amount of time, or if something has requested the logs be
454 	 * flushed ASAP (see ddt_walk_init()).
455 	 */
456 
457 	/*
458 	 * The log tree is too large if the memory usage of its entries is over
459 	 * half of the memory limit. This effectively gives each log tree half
460 	 * the available memory.
461 	 */
462 	const boolean_t too_large =
463 	    (avl_numnodes(&ddt->ddt_log_active->ddl_tree) *
464 	    DDT_LOG_ENTRY_SIZE(ddt)) >= (zfs_dedup_log_mem_max >> 1);
465 
466 	const boolean_t too_old =
467 	    tx->tx_txg >=
468 	    (ddt->ddt_log_active->ddl_first_txg +
469 	    MAX(1, zfs_dedup_log_txg_max));
470 
471 	const boolean_t force =
472 	    ddt->ddt_log_active->ddl_first_txg <= ddt->ddt_flush_force_txg;
473 
474 	if (!(too_large || too_old || force))
475 		return (B_FALSE);
476 
477 	ddt_log_t *swap = ddt->ddt_log_active;
478 	ddt->ddt_log_active = ddt->ddt_log_flushing;
479 	ddt->ddt_log_flushing = swap;
480 
481 	ASSERT(ddt->ddt_log_active->ddl_flags & DDL_FLAG_FLUSHING);
482 	ddt->ddt_log_active->ddl_flags &=
483 	    ~(DDL_FLAG_FLUSHING | DDL_FLAG_CHECKPOINT);
484 
485 	ASSERT(!(ddt->ddt_log_flushing->ddl_flags & DDL_FLAG_FLUSHING));
486 	ddt->ddt_log_flushing->ddl_flags |= DDL_FLAG_FLUSHING;
487 
488 	ddt->ddt_log_active->ddl_first_txg = tx->tx_txg;
489 
490 	ddt_log_update_header(ddt, ddt->ddt_log_active, tx);
491 	ddt_log_update_header(ddt, ddt->ddt_log_flushing, tx);
492 
493 	ddt_log_update_stats(ddt);
494 
495 	return (B_TRUE);
496 }
497 
498 static inline void
ddt_log_load_entry(ddt_t * ddt,ddt_log_t * ddl,ddt_log_record_t * dlr,const ddt_key_t * checkpoint)499 ddt_log_load_entry(ddt_t *ddt, ddt_log_t *ddl, ddt_log_record_t *dlr,
500     const ddt_key_t *checkpoint)
501 {
502 	ASSERT3U(DLR_GET_TYPE(dlr), ==, DLR_ENTRY);
503 
504 	ddt_log_record_entry_t *dlre =
505 	    (ddt_log_record_entry_t *)dlr->dlr_payload;
506 	if (checkpoint != NULL &&
507 	    ddt_key_compare(&dlre->dlre_key, checkpoint) <= 0) {
508 		/* Skip pre-checkpoint entries; they're already flushed. */
509 		return;
510 	}
511 
512 	ddt_lightweight_entry_t ddlwe;
513 	ddlwe.ddlwe_type = DLR_GET_ENTRY_TYPE(dlr);
514 	ddlwe.ddlwe_class = DLR_GET_ENTRY_CLASS(dlr);
515 
516 	ddlwe.ddlwe_key = dlre->dlre_key;
517 	memcpy(&ddlwe.ddlwe_phys, dlre->dlre_phys, DDT_PHYS_SIZE(ddt));
518 
519 	ddt_log_update_entry(ddt, ddl, &ddlwe);
520 }
521 
522 static void
ddt_log_empty(ddt_t * ddt,ddt_log_t * ddl)523 ddt_log_empty(ddt_t *ddt, ddt_log_t *ddl)
524 {
525 	void *cookie = NULL;
526 	ddt_log_entry_t *ddle;
527 	IMPLY(ddt->ddt_version == UINT64_MAX, avl_is_empty(&ddl->ddl_tree));
528 	while ((ddle =
529 	    avl_destroy_nodes(&ddl->ddl_tree, &cookie)) != NULL) {
530 		kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
531 		    ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
532 	}
533 	ASSERT(avl_is_empty(&ddl->ddl_tree));
534 }
535 
536 static int
ddt_log_load_one(ddt_t * ddt,uint_t n)537 ddt_log_load_one(ddt_t *ddt, uint_t n)
538 {
539 	ASSERT3U(n, <, 2);
540 
541 	ddt_log_t *ddl = &ddt->ddt_log[n];
542 
543 	char name[DDT_NAMELEN];
544 	ddt_log_name(ddt, name, n);
545 
546 	uint64_t obj;
547 	int err = zap_lookup(ddt->ddt_os, ddt->ddt_dir_object, name,
548 	    sizeof (uint64_t), 1, &obj);
549 	if (err == ENOENT)
550 		return (0);
551 	if (err != 0)
552 		return (err);
553 
554 	dnode_t *dn;
555 	err = dnode_hold(ddt->ddt_os, obj, FTAG, &dn);
556 	if (err != 0)
557 		return (err);
558 
559 	ddt_log_header_t hdr;
560 	dmu_buf_t *db;
561 	err = dmu_bonus_hold_by_dnode(dn, FTAG, &db, DMU_READ_NO_PREFETCH);
562 	if (err != 0) {
563 		dnode_rele(dn, FTAG);
564 		return (err);
565 	}
566 	memcpy(&hdr, db->db_data, sizeof (ddt_log_header_t));
567 	dmu_buf_rele(db, FTAG);
568 
569 	if (DLH_GET_VERSION(&hdr) != 1) {
570 		dnode_rele(dn, FTAG);
571 		zfs_dbgmsg("ddt_log_load: spa=%s ddt_log=%s "
572 		    "unknown version=%llu", spa_name(ddt->ddt_spa), name,
573 		    (u_longlong_t)DLH_GET_VERSION(&hdr));
574 		return (SET_ERROR(EINVAL));
575 	}
576 
577 	ddt_key_t *checkpoint = NULL;
578 	if (DLH_GET_FLAGS(&hdr) & DDL_FLAG_CHECKPOINT) {
579 		/*
580 		 * If the log has a checkpoint, then we can ignore any entries
581 		 * that have already been flushed.
582 		 */
583 		ASSERT(DLH_GET_FLAGS(&hdr) & DDL_FLAG_FLUSHING);
584 		checkpoint = &hdr.dlh_checkpoint;
585 	}
586 
587 	if (hdr.dlh_length > 0) {
588 		dmu_prefetch_by_dnode(dn, 0, 0, hdr.dlh_length,
589 		    ZIO_PRIORITY_SYNC_READ);
590 
591 		for (uint64_t offset = 0; offset < hdr.dlh_length;
592 		    offset += dn->dn_datablksz) {
593 			err = dmu_buf_hold_by_dnode(dn, offset, FTAG, &db,
594 			    DMU_READ_PREFETCH);
595 			if (err != 0) {
596 				dnode_rele(dn, FTAG);
597 				ddt_log_empty(ddt, ddl);
598 				return (err);
599 			}
600 
601 			uint64_t boffset = 0;
602 			while (boffset < db->db_size) {
603 				ddt_log_record_t *dlr =
604 				    (ddt_log_record_t *)(db->db_data + boffset);
605 
606 				/* Partially-filled block, skip the rest */
607 				if (DLR_GET_TYPE(dlr) == DLR_INVALID)
608 					break;
609 
610 				switch (DLR_GET_TYPE(dlr)) {
611 				case DLR_ENTRY:
612 					ddt_log_load_entry(ddt, ddl, dlr,
613 					    checkpoint);
614 					break;
615 
616 				default:
617 					dmu_buf_rele(db, FTAG);
618 					dnode_rele(dn, FTAG);
619 					ddt_log_empty(ddt, ddl);
620 					return (SET_ERROR(EINVAL));
621 				}
622 
623 				boffset += DLR_GET_RECLEN(dlr);
624 			}
625 
626 			dmu_buf_rele(db, FTAG);
627 		}
628 	}
629 
630 	dnode_rele(dn, FTAG);
631 
632 	ddl->ddl_object = obj;
633 	ddl->ddl_flags = DLH_GET_FLAGS(&hdr);
634 	ddl->ddl_length = hdr.dlh_length;
635 	ddl->ddl_first_txg = hdr.dlh_first_txg;
636 
637 	if (ddl->ddl_flags & DDL_FLAG_FLUSHING)
638 		ddt->ddt_log_flushing = ddl;
639 	else
640 		ddt->ddt_log_active = ddl;
641 
642 	return (0);
643 }
644 
645 int
ddt_log_load(ddt_t * ddt)646 ddt_log_load(ddt_t *ddt)
647 {
648 	int err;
649 
650 	if (spa_load_state(ddt->ddt_spa) == SPA_LOAD_TRYIMPORT) {
651 		/*
652 		 * The DDT is going to be freed again in a moment, so there's
653 		 * no point loading the log; it'll just slow down import.
654 		 */
655 		return (0);
656 	}
657 
658 	ASSERT0(ddt->ddt_log[0].ddl_object);
659 	ASSERT0(ddt->ddt_log[1].ddl_object);
660 	if (ddt->ddt_dir_object == 0) {
661 		/*
662 		 * If we're configured but the containing dir doesn't exist
663 		 * yet, then the log object can't possibly exist either.
664 		 */
665 		ASSERT3U(ddt->ddt_version, !=, UINT64_MAX);
666 		return (SET_ERROR(ENOENT));
667 	}
668 
669 	if ((err = ddt_log_load_one(ddt, 0)) != 0)
670 		return (err);
671 	if ((err = ddt_log_load_one(ddt, 1)) != 0)
672 		return (err);
673 
674 	VERIFY3P(ddt->ddt_log_active, !=, ddt->ddt_log_flushing);
675 	VERIFY(!(ddt->ddt_log_active->ddl_flags & DDL_FLAG_FLUSHING));
676 	VERIFY(!(ddt->ddt_log_active->ddl_flags & DDL_FLAG_CHECKPOINT));
677 	VERIFY(ddt->ddt_log_flushing->ddl_flags & DDL_FLAG_FLUSHING);
678 
679 	/*
680 	 * We have two finalisation tasks:
681 	 *
682 	 * - rebuild the histogram. We do this at the end rather than while
683 	 *   we're loading so we don't need to uncount and recount entries that
684 	 *   appear multiple times in the log.
685 	 *
686 	 * - remove entries from the flushing tree that are on both trees. This
687 	 *   happens when ddt_lookup() rehydrates an entry from the flushing
688 	 *   tree, as ddt_log_take_key() removes the entry from the in-memory
689 	 *   tree but doesn't remove it from disk.
690 	 */
691 
692 	/*
693 	 * We don't technically need a config lock here, since there shouldn't
694 	 * be pool config changes during DDT load. dva_get_dsize_sync() via
695 	 * ddt_stat_generate() is expecting it though, and it won't hurt
696 	 * anything, so we take it.
697 	 */
698 	spa_config_enter(ddt->ddt_spa, SCL_STATE, FTAG, RW_READER);
699 
700 	avl_tree_t *al = &ddt->ddt_log_active->ddl_tree;
701 	avl_tree_t *fl = &ddt->ddt_log_flushing->ddl_tree;
702 	ddt_log_entry_t *ae = avl_first(al);
703 	ddt_log_entry_t *fe = avl_first(fl);
704 	while (ae != NULL || fe != NULL) {
705 		ddt_log_entry_t *ddle;
706 		if (ae == NULL) {
707 			/* active exhausted, take flushing */
708 			ddle = fe;
709 			fe = AVL_NEXT(fl, fe);
710 		} else if (fe == NULL) {
711 			/* flushing exuhausted, take active */
712 			ddle = ae;
713 			ae = AVL_NEXT(al, ae);
714 		} else {
715 			/* compare active and flushing */
716 			int c = ddt_key_compare(&ae->ddle_key, &fe->ddle_key);
717 			if (c < 0) {
718 				/* active behind, take and advance */
719 				ddle = ae;
720 				ae = AVL_NEXT(al, ae);
721 			} else if (c > 0) {
722 				/* flushing behind, take and advance */
723 				ddle = fe;
724 				fe = AVL_NEXT(fl, fe);
725 			} else {
726 				/* match. remove from flushing, take active */
727 				ddle = fe;
728 				fe = AVL_NEXT(fl, fe);
729 				avl_remove(fl, ddle);
730 
731 				ddle = ae;
732 				ae = AVL_NEXT(al, ae);
733 			}
734 		}
735 
736 		ddt_lightweight_entry_t ddlwe;
737 		DDT_LOG_ENTRY_TO_LIGHTWEIGHT(ddt, ddle, &ddlwe);
738 		ddt_histogram_add_entry(ddt, &ddt->ddt_log_histogram, &ddlwe);
739 	}
740 
741 	spa_config_exit(ddt->ddt_spa, SCL_STATE, FTAG);
742 
743 	ddt_log_update_stats(ddt);
744 
745 	return (0);
746 }
747 
748 void
ddt_log_alloc(ddt_t * ddt)749 ddt_log_alloc(ddt_t *ddt)
750 {
751 	ASSERT3P(ddt->ddt_log_active, ==, NULL);
752 	ASSERT3P(ddt->ddt_log_flushing, ==, NULL);
753 
754 	avl_create(&ddt->ddt_log[0].ddl_tree, ddt_key_compare,
755 	    sizeof (ddt_log_entry_t), offsetof(ddt_log_entry_t, ddle_node));
756 	avl_create(&ddt->ddt_log[1].ddl_tree, ddt_key_compare,
757 	    sizeof (ddt_log_entry_t), offsetof(ddt_log_entry_t, ddle_node));
758 	ddt->ddt_log_active = &ddt->ddt_log[0];
759 	ddt->ddt_log_flushing = &ddt->ddt_log[1];
760 	ddt->ddt_log_flushing->ddl_flags |= DDL_FLAG_FLUSHING;
761 }
762 
763 void
ddt_log_free(ddt_t * ddt)764 ddt_log_free(ddt_t *ddt)
765 {
766 	ddt_log_empty(ddt, &ddt->ddt_log[0]);
767 	ddt_log_empty(ddt, &ddt->ddt_log[1]);
768 	avl_destroy(&ddt->ddt_log[0].ddl_tree);
769 	avl_destroy(&ddt->ddt_log[1].ddl_tree);
770 }
771 
772 ZFS_MODULE_PARAM(zfs_dedup, zfs_dedup_, log_txg_max, UINT, ZMOD_RW,
773 	"Max transactions before starting to flush dedup logs");
774 
775 ZFS_MODULE_PARAM(zfs_dedup, zfs_dedup_, log_mem_max, U64, ZMOD_RD,
776 	"Max memory for dedup logs");
777 
778 ZFS_MODULE_PARAM(zfs_dedup, zfs_dedup_, log_mem_max_percent, UINT, ZMOD_RD,
779 	"Max memory for dedup logs, as % of total memory");
780