xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_indirect_mapping.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * This file and its contents are supplied under the terms of the
6  * Common Development and Distribution License ("CDDL"), version 1.0.
7  * You may only use this file in accordance with the terms of version
8  * 1.0 of the CDDL.
9  *
10  * A full copy of the text of the CDDL should have accompanied this
11  * source.  A copy of the CDDL is also available via the Internet at
12  * http://www.illumos.org/license/CDDL.
13  *
14  * CDDL HEADER END
15  */
16 
17 /*
18  * Copyright (c) 2015, 2017 by Delphix. All rights reserved.
19  */
20 
21 #include <sys/dmu_tx.h>
22 #include <sys/dsl_pool.h>
23 #include <sys/spa.h>
24 #include <sys/vdev_impl.h>
25 #include <sys/vdev_indirect_mapping.h>
26 #include <sys/zfeature.h>
27 #include <sys/dmu_objset.h>
28 
29 #ifdef ZFS_DEBUG
30 static boolean_t
vdev_indirect_mapping_verify(vdev_indirect_mapping_t * vim)31 vdev_indirect_mapping_verify(vdev_indirect_mapping_t *vim)
32 {
33 	ASSERT(vim != NULL);
34 
35 	ASSERT(vim->vim_object != 0);
36 	ASSERT(vim->vim_objset != NULL);
37 	ASSERT(vim->vim_phys != NULL);
38 	ASSERT(vim->vim_dbuf != NULL);
39 
40 	EQUIV(vim->vim_phys->vimp_num_entries > 0,
41 	    vim->vim_entries != NULL);
42 	if (vim->vim_phys->vimp_num_entries > 0) {
43 		vdev_indirect_mapping_entry_phys_t *last_entry __maybe_unused =
44 		    &vim->vim_entries[vim->vim_phys->vimp_num_entries - 1];
45 		uint64_t offset __maybe_unused =
46 		    DVA_MAPPING_GET_SRC_OFFSET(last_entry);
47 		uint64_t size __maybe_unused =
48 		    DVA_GET_ASIZE(&last_entry->vimep_dst);
49 
50 		ASSERT3U(vim->vim_phys->vimp_max_offset, >=, offset + size);
51 	}
52 	if (vim->vim_havecounts) {
53 		ASSERT(vim->vim_phys->vimp_counts_object != 0);
54 	}
55 
56 	return (B_TRUE);
57 }
58 #else
59 #define	vdev_indirect_mapping_verify(vim) ((void) sizeof (vim), B_TRUE)
60 #endif
61 
62 uint64_t
vdev_indirect_mapping_num_entries(vdev_indirect_mapping_t * vim)63 vdev_indirect_mapping_num_entries(vdev_indirect_mapping_t *vim)
64 {
65 	ASSERT(vdev_indirect_mapping_verify(vim));
66 
67 	return (vim->vim_phys->vimp_num_entries);
68 }
69 
70 uint64_t
vdev_indirect_mapping_max_offset(vdev_indirect_mapping_t * vim)71 vdev_indirect_mapping_max_offset(vdev_indirect_mapping_t *vim)
72 {
73 	ASSERT(vdev_indirect_mapping_verify(vim));
74 
75 	return (vim->vim_phys->vimp_max_offset);
76 }
77 
78 uint64_t
vdev_indirect_mapping_object(vdev_indirect_mapping_t * vim)79 vdev_indirect_mapping_object(vdev_indirect_mapping_t *vim)
80 {
81 	ASSERT(vdev_indirect_mapping_verify(vim));
82 
83 	return (vim->vim_object);
84 }
85 
86 uint64_t
vdev_indirect_mapping_bytes_mapped(vdev_indirect_mapping_t * vim)87 vdev_indirect_mapping_bytes_mapped(vdev_indirect_mapping_t *vim)
88 {
89 	ASSERT(vdev_indirect_mapping_verify(vim));
90 
91 	return (vim->vim_phys->vimp_bytes_mapped);
92 }
93 
94 /*
95  * The length (in bytes) of the mapping object array in memory and
96  * (logically) on disk.
97  *
98  * Note that unlike most of our accessor functions,
99  * we don't assert that the struct is consistent; therefore it can be
100  * called while there may be concurrent changes, if we don't care about
101  * the value being immediately stale (e.g. from spa_removal_get_stats()).
102  */
103 uint64_t
vdev_indirect_mapping_size(vdev_indirect_mapping_t * vim)104 vdev_indirect_mapping_size(vdev_indirect_mapping_t *vim)
105 {
106 	return (vim->vim_phys->vimp_num_entries * sizeof (*vim->vim_entries));
107 }
108 
109 /*
110  * Compare an offset with an indirect mapping entry; there are three
111  * possible scenarios:
112  *
113  *     1. The offset is "less than" the mapping entry; meaning the
114  *        offset is less than the source offset of the mapping entry. In
115  *        this case, there is no overlap between the offset and the
116  *        mapping entry and -1 will be returned.
117  *
118  *     2. The offset is "greater than" the mapping entry; meaning the
119  *        offset is greater than the mapping entry's source offset plus
120  *        the entry's size. In this case, there is no overlap between
121  *        the offset and the mapping entry and 1 will be returned.
122  *
123  *        NOTE: If the offset is actually equal to the entry's offset
124  *        plus size, this is considered to be "greater" than the entry,
125  *        and this case applies (i.e. 1 will be returned). Thus, the
126  *        entry's "range" can be considered to be inclusive at its
127  *        start, but exclusive at its end: e.g. [src, src + size).
128  *
129  *     3. The last case to consider is if the offset actually falls
130  *        within the mapping entry's range. If this is the case, the
131  *        offset is considered to be "equal to" the mapping entry and
132  *        0 will be returned.
133  *
134  *        NOTE: If the offset is equal to the entry's source offset,
135  *        this case applies and 0 will be returned. If the offset is
136  *        equal to the entry's source plus its size, this case does
137  *        *not* apply (see "NOTE" above for scenario 2), and 1 will be
138  *        returned.
139  */
140 static int
dva_mapping_overlap_compare(const void * v_key,const void * v_array_elem)141 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem)
142 {
143 	const uint64_t * const key = v_key;
144 	const vdev_indirect_mapping_entry_phys_t * const array_elem =
145 	    v_array_elem;
146 	uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem);
147 
148 	if (*key < src_offset) {
149 		return (-1);
150 	} else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) {
151 		return (0);
152 	} else {
153 		return (1);
154 	}
155 }
156 
157 /*
158  * Returns the mapping entry for the given offset.
159  *
160  * It's possible that the given offset will not be in the mapping table
161  * (i.e. no mapping entries contain this offset), in which case, the
162  * return value value depends on the "next_if_missing" parameter.
163  *
164  * If the offset is not found in the table and "next_if_missing" is
165  * B_FALSE, then NULL will always be returned. The behavior is intended
166  * to allow consumers to get the entry corresponding to the offset
167  * parameter, iff the offset overlaps with an entry in the table.
168  *
169  * If the offset is not found in the table and "next_if_missing" is
170  * B_TRUE, then the entry nearest to the given offset will be returned,
171  * such that the entry's source offset is greater than the offset
172  * passed in (i.e. the "next" mapping entry in the table is returned, if
173  * the offset is missing from the table). If there are no entries whose
174  * source offset is greater than the passed in offset, NULL is returned.
175  */
176 static vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_entry_for_offset_impl(vdev_indirect_mapping_t * vim,uint64_t offset,boolean_t next_if_missing)177 vdev_indirect_mapping_entry_for_offset_impl(vdev_indirect_mapping_t *vim,
178     uint64_t offset, boolean_t next_if_missing)
179 {
180 	ASSERT(vdev_indirect_mapping_verify(vim));
181 	ASSERT(vim->vim_phys->vimp_num_entries > 0);
182 
183 	vdev_indirect_mapping_entry_phys_t *entry = NULL;
184 
185 	uint64_t last = vim->vim_phys->vimp_num_entries - 1;
186 	uint64_t base = 0;
187 
188 	/*
189 	 * We don't define these inside of the while loop because we use
190 	 * their value in the case that offset isn't in the mapping.
191 	 */
192 	uint64_t mid;
193 	int result;
194 
195 	while (last >= base) {
196 		mid = base + ((last - base) >> 1);
197 
198 		result = dva_mapping_overlap_compare(&offset,
199 		    &vim->vim_entries[mid]);
200 
201 		if (result == 0) {
202 			entry = &vim->vim_entries[mid];
203 			break;
204 		} else if (result < 0) {
205 			last = mid - 1;
206 		} else {
207 			base = mid + 1;
208 		}
209 	}
210 
211 	if (entry == NULL && next_if_missing) {
212 		ASSERT3U(base, ==, last + 1);
213 		ASSERT(mid == base || mid == last);
214 		ASSERT3S(result, !=, 0);
215 
216 		/*
217 		 * The offset we're looking for isn't actually contained
218 		 * in the mapping table, thus we need to return the
219 		 * closest mapping entry that is greater than the
220 		 * offset. We reuse the result of the last comparison,
221 		 * comparing the mapping entry at index "mid" and the
222 		 * offset. The offset is guaranteed to lie between
223 		 * indices one less than "mid", and one greater than
224 		 * "mid"; we just need to determine if offset is greater
225 		 * than, or less than the mapping entry contained at
226 		 * index "mid".
227 		 */
228 
229 		uint64_t index;
230 		if (result < 0)
231 			index = mid;
232 		else
233 			index = mid + 1;
234 
235 		ASSERT3U(index, <=, vim->vim_phys->vimp_num_entries);
236 
237 		if (index == vim->vim_phys->vimp_num_entries) {
238 			/*
239 			 * If "index" is past the end of the entries
240 			 * array, then not only is the offset not in the
241 			 * mapping table, but it's actually greater than
242 			 * all entries in the table. In this case, we
243 			 * can't return a mapping entry greater than the
244 			 * offset (since none exist), so we return NULL.
245 			 */
246 
247 			ASSERT3S(dva_mapping_overlap_compare(&offset,
248 			    &vim->vim_entries[index - 1]), >, 0);
249 
250 			return (NULL);
251 		} else {
252 			/*
253 			 * Just to be safe, we verify the offset falls
254 			 * in between the mapping entries at index and
255 			 * one less than index. Since we know the offset
256 			 * doesn't overlap an entry, and we're supposed
257 			 * to return the entry just greater than the
258 			 * offset, both of the following tests must be
259 			 * true.
260 			 */
261 			ASSERT3S(dva_mapping_overlap_compare(&offset,
262 			    &vim->vim_entries[index]), <, 0);
263 			IMPLY(index >= 1, dva_mapping_overlap_compare(&offset,
264 			    &vim->vim_entries[index - 1]) > 0);
265 
266 			return (&vim->vim_entries[index]);
267 		}
268 	} else {
269 		return (entry);
270 	}
271 }
272 
273 vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t * vim,uint64_t offset)274 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim,
275     uint64_t offset)
276 {
277 	return (vdev_indirect_mapping_entry_for_offset_impl(vim, offset,
278 	    B_FALSE));
279 }
280 
281 vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_entry_for_offset_or_next(vdev_indirect_mapping_t * vim,uint64_t offset)282 vdev_indirect_mapping_entry_for_offset_or_next(vdev_indirect_mapping_t *vim,
283     uint64_t offset)
284 {
285 	return (vdev_indirect_mapping_entry_for_offset_impl(vim, offset,
286 	    B_TRUE));
287 }
288 
289 void
vdev_indirect_mapping_close(vdev_indirect_mapping_t * vim)290 vdev_indirect_mapping_close(vdev_indirect_mapping_t *vim)
291 {
292 	ASSERT(vdev_indirect_mapping_verify(vim));
293 
294 	if (vim->vim_phys->vimp_num_entries > 0) {
295 		uint64_t map_size = vdev_indirect_mapping_size(vim);
296 		vmem_free(vim->vim_entries, map_size);
297 		vim->vim_entries = NULL;
298 	}
299 
300 	dmu_buf_rele(vim->vim_dbuf, vim);
301 
302 	vim->vim_objset = NULL;
303 	vim->vim_object = 0;
304 	vim->vim_dbuf = NULL;
305 	vim->vim_phys = NULL;
306 
307 	kmem_free(vim, sizeof (*vim));
308 }
309 
310 uint64_t
vdev_indirect_mapping_alloc(objset_t * os,dmu_tx_t * tx)311 vdev_indirect_mapping_alloc(objset_t *os, dmu_tx_t *tx)
312 {
313 	uint64_t object;
314 	ASSERT(dmu_tx_is_syncing(tx));
315 	uint64_t bonus_size = VDEV_INDIRECT_MAPPING_SIZE_V0;
316 
317 	if (spa_feature_is_enabled(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
318 		bonus_size = sizeof (vdev_indirect_mapping_phys_t);
319 	}
320 
321 	object = dmu_object_alloc(os,
322 	    DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
323 	    DMU_OTN_UINT64_METADATA, bonus_size,
324 	    tx);
325 
326 	if (spa_feature_is_enabled(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
327 		dmu_buf_t *dbuf;
328 		vdev_indirect_mapping_phys_t *vimp;
329 
330 		VERIFY0(dmu_bonus_hold(os, object, FTAG, &dbuf));
331 		dmu_buf_will_dirty(dbuf, tx);
332 		vimp = dbuf->db_data;
333 		vimp->vimp_counts_object = dmu_object_alloc(os,
334 		    DMU_OTN_UINT32_METADATA, SPA_OLD_MAXBLOCKSIZE,
335 		    DMU_OT_NONE, 0, tx);
336 		spa_feature_incr(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
337 		dmu_buf_rele(dbuf, FTAG);
338 	}
339 
340 	return (object);
341 }
342 
343 
344 vdev_indirect_mapping_t *
vdev_indirect_mapping_open(objset_t * os,uint64_t mapping_object)345 vdev_indirect_mapping_open(objset_t *os, uint64_t mapping_object)
346 {
347 	vdev_indirect_mapping_t *vim = kmem_zalloc(sizeof (*vim), KM_SLEEP);
348 	dmu_object_info_t doi;
349 	VERIFY0(dmu_object_info(os, mapping_object, &doi));
350 
351 	vim->vim_objset = os;
352 	vim->vim_object = mapping_object;
353 
354 	VERIFY0(dmu_bonus_hold(os, vim->vim_object, vim,
355 	    &vim->vim_dbuf));
356 	vim->vim_phys = vim->vim_dbuf->db_data;
357 
358 	vim->vim_havecounts =
359 	    (doi.doi_bonus_size > VDEV_INDIRECT_MAPPING_SIZE_V0);
360 
361 	if (vim->vim_phys->vimp_num_entries > 0) {
362 		uint64_t map_size = vdev_indirect_mapping_size(vim);
363 		vim->vim_entries = vmem_alloc(map_size, KM_SLEEP);
364 		VERIFY0(dmu_read(os, vim->vim_object, 0, map_size,
365 		    vim->vim_entries, DMU_READ_PREFETCH));
366 	}
367 
368 	ASSERT(vdev_indirect_mapping_verify(vim));
369 
370 	return (vim);
371 }
372 
373 void
vdev_indirect_mapping_free(objset_t * os,uint64_t object,dmu_tx_t * tx)374 vdev_indirect_mapping_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
375 {
376 	vdev_indirect_mapping_t *vim = vdev_indirect_mapping_open(os, object);
377 	if (vim->vim_havecounts) {
378 		VERIFY0(dmu_object_free(os, vim->vim_phys->vimp_counts_object,
379 		    tx));
380 		spa_feature_decr(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
381 	}
382 	vdev_indirect_mapping_close(vim);
383 
384 	VERIFY0(dmu_object_free(os, object, tx));
385 }
386 
387 /*
388  * Append the list of vdev_indirect_mapping_entry_t's to the on-disk
389  * mapping object.  Also remove the entries from the list and free them.
390  * This also implicitly extends the max_offset of the mapping (to the end
391  * of the last entry).
392  */
393 void
vdev_indirect_mapping_add_entries(vdev_indirect_mapping_t * vim,list_t * list,dmu_tx_t * tx)394 vdev_indirect_mapping_add_entries(vdev_indirect_mapping_t *vim,
395     list_t *list, dmu_tx_t *tx)
396 {
397 	vdev_indirect_mapping_entry_phys_t *mapbuf;
398 	uint64_t old_size;
399 	uint32_t *countbuf = NULL;
400 	vdev_indirect_mapping_entry_phys_t *old_entries;
401 	uint64_t old_count;
402 	uint64_t entries_written = 0;
403 
404 	ASSERT(vdev_indirect_mapping_verify(vim));
405 	ASSERT(dmu_tx_is_syncing(tx));
406 	ASSERT(dsl_pool_sync_context(dmu_tx_pool(tx)));
407 	ASSERT(!list_is_empty(list));
408 
409 	old_size = vdev_indirect_mapping_size(vim);
410 	old_entries = vim->vim_entries;
411 	old_count = vim->vim_phys->vimp_num_entries;
412 
413 	dmu_buf_will_dirty(vim->vim_dbuf, tx);
414 
415 	mapbuf = vmem_alloc(SPA_OLD_MAXBLOCKSIZE, KM_SLEEP);
416 	if (vim->vim_havecounts) {
417 		countbuf = vmem_alloc(SPA_OLD_MAXBLOCKSIZE, KM_SLEEP);
418 		ASSERT(spa_feature_is_active(vim->vim_objset->os_spa,
419 		    SPA_FEATURE_OBSOLETE_COUNTS));
420 	}
421 	while (!list_is_empty(list)) {
422 		uint64_t i;
423 		/*
424 		 * Write entries from the list to the
425 		 * vdev_im_object in batches of size SPA_OLD_MAXBLOCKSIZE.
426 		 */
427 		for (i = 0; i < SPA_OLD_MAXBLOCKSIZE / sizeof (*mapbuf); i++) {
428 			vdev_indirect_mapping_entry_t *entry =
429 			    list_remove_head(list);
430 			if (entry == NULL)
431 				break;
432 
433 			uint64_t size =
434 			    DVA_GET_ASIZE(&entry->vime_mapping.vimep_dst);
435 			uint64_t src_offset =
436 			    DVA_MAPPING_GET_SRC_OFFSET(&entry->vime_mapping);
437 
438 			/*
439 			 * We shouldn't be adding an entry which is fully
440 			 * obsolete.
441 			 */
442 			ASSERT3U(entry->vime_obsolete_count, <, size);
443 			IMPLY(entry->vime_obsolete_count != 0,
444 			    vim->vim_havecounts);
445 
446 			mapbuf[i] = entry->vime_mapping;
447 			if (vim->vim_havecounts)
448 				countbuf[i] = entry->vime_obsolete_count;
449 
450 			vim->vim_phys->vimp_bytes_mapped += size;
451 			ASSERT3U(src_offset, >=,
452 			    vim->vim_phys->vimp_max_offset);
453 			vim->vim_phys->vimp_max_offset = src_offset + size;
454 
455 			entries_written++;
456 
457 			vmem_free(entry, sizeof (*entry));
458 		}
459 		dmu_write(vim->vim_objset, vim->vim_object,
460 		    vim->vim_phys->vimp_num_entries * sizeof (*mapbuf),
461 		    i * sizeof (*mapbuf),
462 		    mapbuf, tx);
463 		if (vim->vim_havecounts) {
464 			dmu_write(vim->vim_objset,
465 			    vim->vim_phys->vimp_counts_object,
466 			    vim->vim_phys->vimp_num_entries *
467 			    sizeof (*countbuf),
468 			    i * sizeof (*countbuf), countbuf, tx);
469 		}
470 		vim->vim_phys->vimp_num_entries += i;
471 	}
472 	vmem_free(mapbuf, SPA_OLD_MAXBLOCKSIZE);
473 	if (vim->vim_havecounts)
474 		vmem_free(countbuf, SPA_OLD_MAXBLOCKSIZE);
475 
476 	/*
477 	 * Update the entry array to reflect the new entries. First, copy
478 	 * over any old entries then read back the new entries we just wrote.
479 	 */
480 	uint64_t new_size = vdev_indirect_mapping_size(vim);
481 	ASSERT3U(new_size, >, old_size);
482 	ASSERT3U(new_size - old_size, ==,
483 	    entries_written * sizeof (vdev_indirect_mapping_entry_phys_t));
484 	vim->vim_entries = vmem_alloc(new_size, KM_SLEEP);
485 	if (old_size > 0) {
486 		memcpy(vim->vim_entries, old_entries, old_size);
487 		vmem_free(old_entries, old_size);
488 	}
489 	VERIFY0(dmu_read(vim->vim_objset, vim->vim_object, old_size,
490 	    new_size - old_size, &vim->vim_entries[old_count],
491 	    DMU_READ_PREFETCH));
492 
493 	zfs_dbgmsg("txg %llu: wrote %llu entries to "
494 	    "indirect mapping obj %llu; max offset=0x%llx",
495 	    (u_longlong_t)dmu_tx_get_txg(tx),
496 	    (u_longlong_t)entries_written,
497 	    (u_longlong_t)vim->vim_object,
498 	    (u_longlong_t)vim->vim_phys->vimp_max_offset);
499 }
500 
501 /*
502  * Increment the relevant counts for the specified offset and length.
503  * The counts array must be obtained from
504  * vdev_indirect_mapping_load_obsolete_counts().
505  */
506 void
vdev_indirect_mapping_increment_obsolete_count(vdev_indirect_mapping_t * vim,uint64_t offset,uint64_t length,uint32_t * counts)507 vdev_indirect_mapping_increment_obsolete_count(vdev_indirect_mapping_t *vim,
508     uint64_t offset, uint64_t length, uint32_t *counts)
509 {
510 	vdev_indirect_mapping_entry_phys_t *mapping;
511 	uint64_t index;
512 
513 	mapping = vdev_indirect_mapping_entry_for_offset(vim,  offset);
514 
515 	ASSERT(length > 0);
516 	ASSERT3P(mapping, !=, NULL);
517 
518 	index = mapping - vim->vim_entries;
519 
520 	while (length > 0) {
521 		ASSERT3U(index, <, vdev_indirect_mapping_num_entries(vim));
522 
523 		uint64_t size = DVA_GET_ASIZE(&mapping->vimep_dst);
524 		uint64_t inner_offset = offset -
525 		    DVA_MAPPING_GET_SRC_OFFSET(mapping);
526 		VERIFY3U(inner_offset, <, size);
527 		uint64_t inner_size = MIN(length, size - inner_offset);
528 
529 		VERIFY3U(counts[index] + inner_size, <=, size);
530 		counts[index] += inner_size;
531 
532 		offset += inner_size;
533 		length -= inner_size;
534 		mapping++;
535 		index++;
536 	}
537 }
538 
539 typedef struct load_obsolete_space_map_arg {
540 	vdev_indirect_mapping_t	*losma_vim;
541 	uint32_t		*losma_counts;
542 } load_obsolete_space_map_arg_t;
543 
544 static int
load_obsolete_sm_callback(space_map_entry_t * sme,void * arg)545 load_obsolete_sm_callback(space_map_entry_t *sme, void *arg)
546 {
547 	load_obsolete_space_map_arg_t *losma = arg;
548 	ASSERT3S(sme->sme_type, ==, SM_ALLOC);
549 
550 	vdev_indirect_mapping_increment_obsolete_count(losma->losma_vim,
551 	    sme->sme_offset, sme->sme_run, losma->losma_counts);
552 
553 	return (0);
554 }
555 
556 /*
557  * Modify the counts (increment them) based on the spacemap.
558  */
559 void
vdev_indirect_mapping_load_obsolete_spacemap(vdev_indirect_mapping_t * vim,uint32_t * counts,space_map_t * obsolete_space_sm)560 vdev_indirect_mapping_load_obsolete_spacemap(vdev_indirect_mapping_t *vim,
561     uint32_t *counts, space_map_t *obsolete_space_sm)
562 {
563 	load_obsolete_space_map_arg_t losma;
564 	losma.losma_counts = counts;
565 	losma.losma_vim = vim;
566 	VERIFY0(space_map_iterate(obsolete_space_sm,
567 	    space_map_length(obsolete_space_sm),
568 	    load_obsolete_sm_callback, &losma));
569 }
570 
571 /*
572  * Read the obsolete counts from disk, returning them in an array.
573  */
574 uint32_t *
vdev_indirect_mapping_load_obsolete_counts(vdev_indirect_mapping_t * vim)575 vdev_indirect_mapping_load_obsolete_counts(vdev_indirect_mapping_t *vim)
576 {
577 	ASSERT(vdev_indirect_mapping_verify(vim));
578 
579 	uint64_t counts_size =
580 	    vim->vim_phys->vimp_num_entries * sizeof (uint32_t);
581 	uint32_t *counts = vmem_alloc(counts_size, KM_SLEEP);
582 	if (vim->vim_havecounts) {
583 		VERIFY0(dmu_read(vim->vim_objset,
584 		    vim->vim_phys->vimp_counts_object,
585 		    0, counts_size,
586 		    counts, DMU_READ_PREFETCH));
587 	} else {
588 		memset(counts, 0, counts_size);
589 	}
590 	return (counts);
591 }
592 
593 extern void
vdev_indirect_mapping_free_obsolete_counts(vdev_indirect_mapping_t * vim,uint32_t * counts)594 vdev_indirect_mapping_free_obsolete_counts(vdev_indirect_mapping_t *vim,
595     uint32_t *counts)
596 {
597 	ASSERT(vdev_indirect_mapping_verify(vim));
598 
599 	vmem_free(counts, vim->vim_phys->vimp_num_entries * sizeof (uint32_t));
600 }
601 
602 #if defined(_KERNEL)
603 EXPORT_SYMBOL(vdev_indirect_mapping_add_entries);
604 EXPORT_SYMBOL(vdev_indirect_mapping_alloc);
605 EXPORT_SYMBOL(vdev_indirect_mapping_bytes_mapped);
606 EXPORT_SYMBOL(vdev_indirect_mapping_close);
607 EXPORT_SYMBOL(vdev_indirect_mapping_entry_for_offset);
608 EXPORT_SYMBOL(vdev_indirect_mapping_entry_for_offset_or_next);
609 EXPORT_SYMBOL(vdev_indirect_mapping_free);
610 EXPORT_SYMBOL(vdev_indirect_mapping_free_obsolete_counts);
611 EXPORT_SYMBOL(vdev_indirect_mapping_increment_obsolete_count);
612 EXPORT_SYMBOL(vdev_indirect_mapping_load_obsolete_counts);
613 EXPORT_SYMBOL(vdev_indirect_mapping_load_obsolete_spacemap);
614 EXPORT_SYMBOL(vdev_indirect_mapping_max_offset);
615 EXPORT_SYMBOL(vdev_indirect_mapping_num_entries);
616 EXPORT_SYMBOL(vdev_indirect_mapping_object);
617 EXPORT_SYMBOL(vdev_indirect_mapping_open);
618 EXPORT_SYMBOL(vdev_indirect_mapping_size);
619 #endif
620