xref: /illumos-gate/usr/src/uts/common/fs/zfs/space_map.c (revision bbbed746ed07daa0e18f08cf6145bb3f2b063f24)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
27  */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zio.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
39 
40 /*
41  * Note on space map block size:
42  *
43  * The data for a given space map can be kept on blocks of any size.
44  * Larger blocks entail fewer i/o operations, but they also cause the
45  * DMU to keep more data in-core, and also to waste more i/o bandwidth
46  * when only a few blocks have changed since the last transaction group.
47  */
48 
49 /*
50  * Iterate through the space map, invoking the callback on each (non-debug)
51  * space map entry.
52  */
53 int
54 space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
55 {
56 	uint64_t *entry, *entry_map, *entry_map_end;
57 	uint64_t bufsize, size, offset, end;
58 	int error = 0;
59 
60 	end = space_map_length(sm);
61 
62 	bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
63 	entry_map = zio_buf_alloc(bufsize);
64 
65 	if (end > bufsize) {
66 		dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
67 		    end - bufsize, ZIO_PRIORITY_SYNC_READ);
68 	}
69 
70 	for (offset = 0; offset < end && error == 0; offset += bufsize) {
71 		size = MIN(end - offset, bufsize);
72 		VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
73 		VERIFY(size != 0);
74 		ASSERT3U(sm->sm_blksz, !=, 0);
75 
76 		dprintf("object=%llu  offset=%llx  size=%llx\n",
77 		    space_map_object(sm), offset, size);
78 
79 		error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
80 		    entry_map, DMU_READ_PREFETCH);
81 		if (error != 0)
82 			break;
83 
84 		entry_map_end = entry_map + (size / sizeof (uint64_t));
85 		for (entry = entry_map; entry < entry_map_end && error == 0;
86 		    entry++) {
87 			uint64_t e = *entry;
88 			uint64_t offset, size;
89 
90 			if (SM_DEBUG_DECODE(e))	/* Skip debug entries */
91 				continue;
92 
93 			offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
94 			    sm->sm_start;
95 			size = SM_RUN_DECODE(e) << sm->sm_shift;
96 
97 			VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
98 			VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
99 			VERIFY3U(offset, >=, sm->sm_start);
100 			VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
101 			error = callback(SM_TYPE_DECODE(e), offset, size, arg);
102 		}
103 	}
104 
105 	zio_buf_free(entry_map, bufsize);
106 	return (error);
107 }
108 
109 /*
110  * Note: This function performs destructive actions - specifically
111  * it deletes entries from the end of the space map. Thus, callers
112  * should ensure that they are holding the appropriate locks for
113  * the space map that they provide.
114  */
115 int
116 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
117     dmu_tx_t *tx)
118 {
119 	uint64_t bufsize, len;
120 	uint64_t *entry_map;
121 	int error = 0;
122 
123 	len = space_map_length(sm);
124 	bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
125 	entry_map = zio_buf_alloc(bufsize);
126 
127 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
128 
129 	/*
130 	 * Since we can't move the starting offset of the space map
131 	 * (e.g there are reference on-disk pointing to it), we destroy
132 	 * its entries incrementally starting from the end.
133 	 *
134 	 * The logic that follows is basically the same as the one used
135 	 * in space_map_iterate() but it traverses the space map
136 	 * backwards:
137 	 *
138 	 * 1] We figure out the size of the buffer that we want to use
139 	 *    to read the on-disk space map entries.
140 	 * 2] We figure out the offset at the end of the space map where
141 	 *    we will start reading entries into our buffer.
142 	 * 3] We read the on-disk entries into the buffer.
143 	 * 4] We iterate over the entries from end to beginning calling
144 	 *    the callback function on each one. As we move from entry
145 	 *    to entry we decrease the size of the space map, deleting
146 	 *    effectively each entry.
147 	 * 5] If there are no more entries in the space map or the
148 	 *    callback returns a value other than 0, we stop iterating
149 	 *    over the space map. If there are entries remaining and
150 	 *    the callback returned zero we go back to step [1].
151 	 */
152 	uint64_t offset = 0, size = 0;
153 	while (len > 0 && error == 0) {
154 		size = MIN(bufsize, len);
155 
156 		VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
157 		VERIFY3U(size, >, 0);
158 		ASSERT3U(sm->sm_blksz, !=, 0);
159 
160 		offset = len - size;
161 
162 		IMPLY(bufsize > len, offset == 0);
163 		IMPLY(bufsize == len, offset == 0);
164 		IMPLY(bufsize < len, offset > 0);
165 
166 
167 		EQUIV(size == len, offset == 0);
168 		IMPLY(size < len, bufsize < len);
169 
170 		dprintf("object=%llu  offset=%llx  size=%llx\n",
171 		    space_map_object(sm), offset, size);
172 
173 		error = dmu_read(sm->sm_os, space_map_object(sm),
174 		    offset, size, entry_map, DMU_READ_PREFETCH);
175 		if (error != 0)
176 			break;
177 
178 		uint64_t num_entries = size / sizeof (uint64_t);
179 
180 		ASSERT3U(num_entries, >, 0);
181 
182 		while (num_entries > 0) {
183 			uint64_t e, entry_offset, entry_size;
184 			maptype_t type;
185 
186 			e = entry_map[num_entries - 1];
187 
188 			ASSERT3U(num_entries, >, 0);
189 			ASSERT0(error);
190 
191 			if (SM_DEBUG_DECODE(e)) {
192 				sm->sm_phys->smp_objsize -= sizeof (uint64_t);
193 				space_map_update(sm);
194 				len -= sizeof (uint64_t);
195 				num_entries--;
196 				continue;
197 			}
198 
199 			type = SM_TYPE_DECODE(e);
200 			entry_offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
201 			    sm->sm_start;
202 			entry_size = SM_RUN_DECODE(e) << sm->sm_shift;
203 
204 			VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
205 			VERIFY0(P2PHASE(entry_size, 1ULL << sm->sm_shift));
206 			VERIFY3U(entry_offset, >=, sm->sm_start);
207 			VERIFY3U(entry_offset + entry_size, <=,
208 			    sm->sm_start + sm->sm_size);
209 
210 			error = callback(type, entry_offset, entry_size, arg);
211 			if (error != 0)
212 				break;
213 
214 			if (type == SM_ALLOC)
215 				sm->sm_phys->smp_alloc -= entry_size;
216 			else
217 				sm->sm_phys->smp_alloc += entry_size;
218 
219 			sm->sm_phys->smp_objsize -= sizeof (uint64_t);
220 			space_map_update(sm);
221 			len -= sizeof (uint64_t);
222 			num_entries--;
223 		}
224 		IMPLY(error == 0, num_entries == 0);
225 		EQUIV(offset == 0 && error == 0, len == 0 && num_entries == 0);
226 	}
227 
228 	if (len == 0) {
229 		ASSERT0(error);
230 		ASSERT0(offset);
231 		ASSERT0(sm->sm_length);
232 		ASSERT0(sm->sm_phys->smp_objsize);
233 		ASSERT0(sm->sm_alloc);
234 	}
235 
236 	zio_buf_free(entry_map, bufsize);
237 	return (error);
238 }
239 
240 typedef struct space_map_load_arg {
241 	space_map_t	*smla_sm;
242 	range_tree_t	*smla_rt;
243 	maptype_t	smla_type;
244 } space_map_load_arg_t;
245 
246 static int
247 space_map_load_callback(maptype_t type, uint64_t offset, uint64_t size,
248     void *arg)
249 {
250 	space_map_load_arg_t *smla = arg;
251 	if (type == smla->smla_type) {
252 		VERIFY3U(range_tree_space(smla->smla_rt) + size, <=,
253 		    smla->smla_sm->sm_size);
254 		range_tree_add(smla->smla_rt, offset, size);
255 	} else {
256 		range_tree_remove(smla->smla_rt, offset, size);
257 	}
258 
259 	return (0);
260 }
261 
262 /*
263  * Load the space map disk into the specified range tree. Segments of maptype
264  * are added to the range tree, other segment types are removed.
265  */
266 int
267 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
268 {
269 	uint64_t space;
270 	int err;
271 	space_map_load_arg_t smla;
272 
273 	VERIFY0(range_tree_space(rt));
274 	space = space_map_allocated(sm);
275 
276 	if (maptype == SM_FREE) {
277 		range_tree_add(rt, sm->sm_start, sm->sm_size);
278 		space = sm->sm_size - space;
279 	}
280 
281 	smla.smla_rt = rt;
282 	smla.smla_sm = sm;
283 	smla.smla_type = maptype;
284 	err = space_map_iterate(sm, space_map_load_callback, &smla);
285 
286 	if (err == 0) {
287 		VERIFY3U(range_tree_space(rt), ==, space);
288 	} else {
289 		range_tree_vacate(rt, NULL, NULL);
290 	}
291 
292 	return (err);
293 }
294 
295 void
296 space_map_histogram_clear(space_map_t *sm)
297 {
298 	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
299 		return;
300 
301 	bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
302 }
303 
304 boolean_t
305 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
306 {
307 	/*
308 	 * Verify that the in-core range tree does not have any
309 	 * ranges smaller than our sm_shift size.
310 	 */
311 	for (int i = 0; i < sm->sm_shift; i++) {
312 		if (rt->rt_histogram[i] != 0)
313 			return (B_FALSE);
314 	}
315 	return (B_TRUE);
316 }
317 
318 void
319 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
320 {
321 	int idx = 0;
322 
323 	ASSERT(dmu_tx_is_syncing(tx));
324 	VERIFY3U(space_map_object(sm), !=, 0);
325 
326 	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
327 		return;
328 
329 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
330 
331 	ASSERT(space_map_histogram_verify(sm, rt));
332 	/*
333 	 * Transfer the content of the range tree histogram to the space
334 	 * map histogram. The space map histogram contains 32 buckets ranging
335 	 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
336 	 * however, can represent ranges from 2^0 to 2^63. Since the space
337 	 * map only cares about allocatable blocks (minimum of sm_shift) we
338 	 * can safely ignore all ranges in the range tree smaller than sm_shift.
339 	 */
340 	for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
341 
342 		/*
343 		 * Since the largest histogram bucket in the space map is
344 		 * 2^(32+sm_shift-1), we need to normalize the values in
345 		 * the range tree for any bucket larger than that size. For
346 		 * example given an sm_shift of 9, ranges larger than 2^40
347 		 * would get normalized as if they were 1TB ranges. Assume
348 		 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
349 		 * the calculation below would normalize this to 5 * 2^4 (16).
350 		 */
351 		ASSERT3U(i, >=, idx + sm->sm_shift);
352 		sm->sm_phys->smp_histogram[idx] +=
353 		    rt->rt_histogram[i] << (i - idx - sm->sm_shift);
354 
355 		/*
356 		 * Increment the space map's index as long as we haven't
357 		 * reached the maximum bucket size. Accumulate all ranges
358 		 * larger than the max bucket size into the last bucket.
359 		 */
360 		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
361 			ASSERT3U(idx + sm->sm_shift, ==, i);
362 			idx++;
363 			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
364 		}
365 	}
366 }
367 
368 uint64_t
369 space_map_entries(space_map_t *sm, range_tree_t *rt)
370 {
371 	avl_tree_t *t = &rt->rt_root;
372 	range_seg_t *rs;
373 	uint64_t size, entries;
374 
375 	/*
376 	 * All space_maps always have a debug entry so account for it here.
377 	 */
378 	entries = 1;
379 
380 	/*
381 	 * Traverse the range tree and calculate the number of space map
382 	 * entries that would be required to write out the range tree.
383 	 */
384 	for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
385 		size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
386 		entries += howmany(size, SM_RUN_MAX);
387 	}
388 	return (entries);
389 }
390 
391 void
392 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
393     dmu_tx_t *tx)
394 {
395 	objset_t *os = sm->sm_os;
396 	spa_t *spa = dmu_objset_spa(os);
397 	avl_tree_t *t = &rt->rt_root;
398 	range_seg_t *rs;
399 	uint64_t size, total, rt_space, nodes;
400 	uint64_t *entry, *entry_map, *entry_map_end;
401 	uint64_t expected_entries, actual_entries = 1;
402 
403 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
404 	VERIFY3U(space_map_object(sm), !=, 0);
405 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
406 
407 	/*
408 	 * This field is no longer necessary since the in-core space map
409 	 * now contains the object number but is maintained for backwards
410 	 * compatibility.
411 	 */
412 	sm->sm_phys->smp_object = sm->sm_object;
413 
414 	if (range_tree_is_empty(rt)) {
415 		VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
416 		return;
417 	}
418 
419 	if (maptype == SM_ALLOC)
420 		sm->sm_phys->smp_alloc += range_tree_space(rt);
421 	else
422 		sm->sm_phys->smp_alloc -= range_tree_space(rt);
423 
424 	expected_entries = space_map_entries(sm, rt);
425 
426 	entry_map = zio_buf_alloc(sm->sm_blksz);
427 	entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
428 	entry = entry_map;
429 
430 	*entry++ = SM_DEBUG_ENCODE(1) |
431 	    SM_DEBUG_ACTION_ENCODE(maptype) |
432 	    SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
433 	    SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
434 
435 	total = 0;
436 	nodes = avl_numnodes(&rt->rt_root);
437 	rt_space = range_tree_space(rt);
438 	for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
439 		uint64_t start;
440 
441 		size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
442 		start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
443 
444 		total += size << sm->sm_shift;
445 
446 		while (size != 0) {
447 			uint64_t run_len;
448 
449 			run_len = MIN(size, SM_RUN_MAX);
450 
451 			if (entry == entry_map_end) {
452 				dmu_write(os, space_map_object(sm),
453 				    sm->sm_phys->smp_objsize, sm->sm_blksz,
454 				    entry_map, tx);
455 				sm->sm_phys->smp_objsize += sm->sm_blksz;
456 				entry = entry_map;
457 			}
458 
459 			*entry++ = SM_OFFSET_ENCODE(start) |
460 			    SM_TYPE_ENCODE(maptype) |
461 			    SM_RUN_ENCODE(run_len);
462 
463 			start += run_len;
464 			size -= run_len;
465 			actual_entries++;
466 		}
467 	}
468 
469 	if (entry != entry_map) {
470 		size = (entry - entry_map) * sizeof (uint64_t);
471 		dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
472 		    size, entry_map, tx);
473 		sm->sm_phys->smp_objsize += size;
474 	}
475 	ASSERT3U(expected_entries, ==, actual_entries);
476 
477 	/*
478 	 * Ensure that the space_map's accounting wasn't changed
479 	 * while we were in the middle of writing it out.
480 	 */
481 	VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
482 	VERIFY3U(range_tree_space(rt), ==, rt_space);
483 	VERIFY3U(range_tree_space(rt), ==, total);
484 
485 	zio_buf_free(entry_map, sm->sm_blksz);
486 }
487 
488 static int
489 space_map_open_impl(space_map_t *sm)
490 {
491 	int error;
492 	u_longlong_t blocks;
493 
494 	error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
495 	if (error)
496 		return (error);
497 
498 	dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
499 	sm->sm_phys = sm->sm_dbuf->db_data;
500 	return (0);
501 }
502 
503 int
504 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
505     uint64_t start, uint64_t size, uint8_t shift)
506 {
507 	space_map_t *sm;
508 	int error;
509 
510 	ASSERT(*smp == NULL);
511 	ASSERT(os != NULL);
512 	ASSERT(object != 0);
513 
514 	sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
515 
516 	sm->sm_start = start;
517 	sm->sm_size = size;
518 	sm->sm_shift = shift;
519 	sm->sm_os = os;
520 	sm->sm_object = object;
521 
522 	error = space_map_open_impl(sm);
523 	if (error != 0) {
524 		space_map_close(sm);
525 		return (error);
526 	}
527 
528 	*smp = sm;
529 
530 	return (0);
531 }
532 
533 void
534 space_map_close(space_map_t *sm)
535 {
536 	if (sm == NULL)
537 		return;
538 
539 	if (sm->sm_dbuf != NULL)
540 		dmu_buf_rele(sm->sm_dbuf, sm);
541 	sm->sm_dbuf = NULL;
542 	sm->sm_phys = NULL;
543 
544 	kmem_free(sm, sizeof (*sm));
545 }
546 
547 void
548 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
549 {
550 	objset_t *os = sm->sm_os;
551 	spa_t *spa = dmu_objset_spa(os);
552 	dmu_object_info_t doi;
553 
554 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
555 	ASSERT(dmu_tx_is_syncing(tx));
556 	VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
557 
558 	dmu_object_info_from_db(sm->sm_dbuf, &doi);
559 
560 	/*
561 	 * If the space map has the wrong bonus size (because
562 	 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
563 	 * the wrong block size (because space_map_blksz has changed),
564 	 * free and re-allocate its object with the updated sizes.
565 	 *
566 	 * Otherwise, just truncate the current object.
567 	 */
568 	if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
569 	    doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
570 	    doi.doi_data_block_size != blocksize) {
571 		zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
572 		    "object[%llu]: old bonus %u, old blocksz %u",
573 		    dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
574 		    doi.doi_bonus_size, doi.doi_data_block_size);
575 
576 		space_map_free(sm, tx);
577 		dmu_buf_rele(sm->sm_dbuf, sm);
578 
579 		sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
580 		VERIFY0(space_map_open_impl(sm));
581 	} else {
582 		VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
583 
584 		/*
585 		 * If the spacemap is reallocated, its histogram
586 		 * will be reset.  Do the same in the common case so that
587 		 * bugs related to the uncommon case do not go unnoticed.
588 		 */
589 		bzero(sm->sm_phys->smp_histogram,
590 		    sizeof (sm->sm_phys->smp_histogram));
591 	}
592 
593 	dmu_buf_will_dirty(sm->sm_dbuf, tx);
594 	sm->sm_phys->smp_objsize = 0;
595 	sm->sm_phys->smp_alloc = 0;
596 }
597 
598 /*
599  * Update the in-core space_map allocation and length values.
600  */
601 void
602 space_map_update(space_map_t *sm)
603 {
604 	if (sm == NULL)
605 		return;
606 
607 	sm->sm_alloc = sm->sm_phys->smp_alloc;
608 	sm->sm_length = sm->sm_phys->smp_objsize;
609 }
610 
611 uint64_t
612 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
613 {
614 	spa_t *spa = dmu_objset_spa(os);
615 	uint64_t object;
616 	int bonuslen;
617 
618 	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
619 		spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
620 		bonuslen = sizeof (space_map_phys_t);
621 		ASSERT3U(bonuslen, <=, dmu_bonus_max());
622 	} else {
623 		bonuslen = SPACE_MAP_SIZE_V0;
624 	}
625 
626 	object = dmu_object_alloc(os, DMU_OT_SPACE_MAP, blocksize,
627 	    DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
628 
629 	return (object);
630 }
631 
632 void
633 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
634 {
635 	spa_t *spa = dmu_objset_spa(os);
636 	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
637 		dmu_object_info_t doi;
638 
639 		VERIFY0(dmu_object_info(os, smobj, &doi));
640 		if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
641 			spa_feature_decr(spa,
642 			    SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
643 		}
644 	}
645 
646 	VERIFY0(dmu_object_free(os, smobj, tx));
647 }
648 
649 void
650 space_map_free(space_map_t *sm, dmu_tx_t *tx)
651 {
652 	if (sm == NULL)
653 		return;
654 
655 	space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
656 	sm->sm_object = 0;
657 }
658 
659 uint64_t
660 space_map_object(space_map_t *sm)
661 {
662 	return (sm != NULL ? sm->sm_object : 0);
663 }
664 
665 /*
666  * Returns the already synced, on-disk allocated space.
667  */
668 uint64_t
669 space_map_allocated(space_map_t *sm)
670 {
671 	return (sm != NULL ? sm->sm_alloc : 0);
672 }
673 
674 /*
675  * Returns the already synced, on-disk length;
676  */
677 uint64_t
678 space_map_length(space_map_t *sm)
679 {
680 	return (sm != NULL ? sm->sm_length : 0);
681 }
682 
683 /*
684  * Returns the allocated space that is currently syncing.
685  */
686 int64_t
687 space_map_alloc_delta(space_map_t *sm)
688 {
689 	if (sm == NULL)
690 		return (0);
691 	ASSERT(sm->sm_dbuf != NULL);
692 	return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
693 }
694