xref: /linux/tools/testing/selftests/kvm/lib/sparsebit.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Sparse bit array
4  *
5  * Copyright (C) 2018, Google LLC.
6  * Copyright (C) 2018, Red Hat, Inc. (code style cleanup and fuzzing driver)
7  *
8  * This library provides functions to support a memory efficient bit array,
9  * with an index size of 2^64.  A sparsebit array is allocated through
10  * the use sparsebit_alloc() and free'd via sparsebit_free(),
11  * such as in the following:
12  *
13  *   struct sparsebit *s;
14  *   s = sparsebit_alloc();
15  *   sparsebit_free(&s);
16  *
17  * The struct sparsebit type resolves down to a struct sparsebit.
18  * Note that, sparsebit_free() takes a pointer to the sparsebit
19  * structure.  This is so that sparsebit_free() is able to poison
20  * the pointer (e.g. set it to NULL) to the struct sparsebit before
21  * returning to the caller.
22  *
23  * Between the return of sparsebit_alloc() and the call of
24  * sparsebit_free(), there are multiple query and modifying operations
25  * that can be performed on the allocated sparsebit array.  All of
26  * these operations take as a parameter the value returned from
27  * sparsebit_alloc() and most also take a bit index.  Frequently
28  * used routines include:
29  *
30  *  ---- Query Operations
31  *  sparsebit_is_set(s, idx)
32  *  sparsebit_is_clear(s, idx)
33  *  sparsebit_any_set(s)
34  *  sparsebit_first_set(s)
35  *  sparsebit_next_set(s, prev_idx)
36  *
37  *  ---- Modifying Operations
38  *  sparsebit_set(s, idx)
39  *  sparsebit_clear(s, idx)
40  *  sparsebit_set_num(s, idx, num);
41  *  sparsebit_clear_num(s, idx, num);
42  *
43  * A common operation, is to itterate over all the bits set in a test
44  * sparsebit array.  This can be done via code with the following structure:
45  *
46  *   sparsebit_idx_t idx;
47  *   if (sparsebit_any_set(s)) {
48  *     idx = sparsebit_first_set(s);
49  *     do {
50  *       ...
51  *       idx = sparsebit_next_set(s, idx);
52  *     } while (idx != 0);
53  *   }
54  *
55  * The index of the first bit set needs to be obtained via
56  * sparsebit_first_set(), because sparsebit_next_set(), needs
57  * the index of the previously set.  The sparsebit_idx_t type is
58  * unsigned, so there is no previous index before 0 that is available.
59  * Also, the call to sparsebit_first_set() is not made unless there
60  * is at least 1 bit in the array set.  This is because sparsebit_first_set()
61  * aborts if sparsebit_first_set() is called with no bits set.
62  * It is the callers responsibility to assure that the
63  * sparsebit array has at least a single bit set before calling
64  * sparsebit_first_set().
65  *
66  * ==== Implementation Overview ====
67  * For the most part the internal implementation of sparsebit is
68  * opaque to the caller.  One important implementation detail that the
69  * caller may need to be aware of is the spatial complexity of the
70  * implementation.  This implementation of a sparsebit array is not
71  * only sparse, in that it uses memory proportional to the number of bits
72  * set.  It is also efficient in memory usage when most of the bits are
73  * set.
74  *
75  * At a high-level the state of the bit settings are maintained through
76  * the use of a binary-search tree, where each node contains at least
77  * the following members:
78  *
79  *   typedef uint64_t sparsebit_idx_t;
80  *   typedef uint64_t sparsebit_num_t;
81  *
82  *   sparsebit_idx_t idx;
83  *   uint32_t mask;
84  *   sparsebit_num_t num_after;
85  *
86  * The idx member contains the bit index of the first bit described by this
87  * node, while the mask member stores the setting of the first 32-bits.
88  * The setting of the bit at idx + n, where 0 <= n < 32, is located in the
89  * mask member at 1 << n.
90  *
91  * Nodes are sorted by idx and the bits described by two nodes will never
92  * overlap. The idx member is always aligned to the mask size, i.e. a
93  * multiple of 32.
94  *
95  * Beyond a typical implementation, the nodes in this implementation also
96  * contains a member named num_after.  The num_after member holds the
97  * number of bits immediately after the mask bits that are contiguously set.
98  * The use of the num_after member allows this implementation to efficiently
99  * represent cases where most bits are set.  For example, the case of all
100  * but the last two bits set, is represented by the following two nodes:
101  *
102  *   node 0 - idx: 0x0 mask: 0xffffffff num_after: 0xffffffffffffffc0
103  *   node 1 - idx: 0xffffffffffffffe0 mask: 0x3fffffff num_after: 0
104  *
105  * ==== Invariants ====
106  * This implementation usses the following invariants:
107  *
108  *   + Node are only used to represent bits that are set.
109  *     Nodes with a mask of 0 and num_after of 0 are not allowed.
110  *
111  *   + Sum of bits set in all the nodes is equal to the value of
112  *     the struct sparsebit_pvt num_set member.
113  *
114  *   + The setting of at least one bit is always described in a nodes
115  *     mask (mask >= 1).
116  *
117  *   + A node with all mask bits set only occurs when the last bit
118  *     described by the previous node is not equal to this nodes
119  *     starting index - 1.  All such occurences of this condition are
120  *     avoided by moving the setting of the nodes mask bits into
121  *     the previous nodes num_after setting.
122  *
123  *   + Node starting index is evenly divisible by the number of bits
124  *     within a nodes mask member.
125  *
126  *   + Nodes never represent a range of bits that wrap around the
127  *     highest supported index.
128  *
129  *      (idx + MASK_BITS + num_after - 1) <= ((sparsebit_idx_t) 0) - 1)
130  *
131  *     As a consequence of the above, the num_after member of a node
132  *     will always be <=:
133  *
134  *       maximum_index - nodes_starting_index - number_of_mask_bits
135  *
136  *   + Nodes within the binary search tree are sorted based on each
137  *     nodes starting index.
138  *
139  *   + The range of bits described by any two nodes do not overlap.  The
140  *     range of bits described by a single node is:
141  *
142  *       start: node->idx
143  *       end (inclusive): node->idx + MASK_BITS + node->num_after - 1;
144  *
145  * Note, at times these invariants are temporarily violated for a
146  * specific portion of the code.  For example, when setting a mask
147  * bit, there is a small delay between when the mask bit is set and the
148  * value in the struct sparsebit_pvt num_set member is updated.  Other
149  * temporary violations occur when node_split() is called with a specified
150  * index and assures that a node where its mask represents the bit
151  * at the specified index exists.  At times to do this node_split()
152  * must split an existing node into two nodes or create a node that
153  * has no bits set.  Such temporary violations must be corrected before
154  * returning to the caller.  These corrections are typically performed
155  * by the local function node_reduce().
156  */
157 
158 #include "test_util.h"
159 #include "sparsebit.h"
160 #include <limits.h>
161 #include <assert.h>
162 
163 #define DUMP_LINE_MAX 100 /* Does not include indent amount */
164 
165 typedef uint32_t mask_t;
166 #define MASK_BITS (sizeof(mask_t) * CHAR_BIT)
167 
168 struct node {
169 	struct node *parent;
170 	struct node *left;
171 	struct node *right;
172 	sparsebit_idx_t idx; /* index of least-significant bit in mask */
173 	sparsebit_num_t num_after; /* num contiguously set after mask */
174 	mask_t mask;
175 };
176 
177 struct sparsebit {
178 	/*
179 	 * Points to root node of the binary search
180 	 * tree.  Equal to NULL when no bits are set in
181 	 * the entire sparsebit array.
182 	 */
183 	struct node *root;
184 
185 	/*
186 	 * A redundant count of the total number of bits set.  Used for
187 	 * diagnostic purposes and to change the time complexity of
188 	 * sparsebit_num_set() from O(n) to O(1).
189 	 * Note: Due to overflow, a value of 0 means none or all set.
190 	 */
191 	sparsebit_num_t num_set;
192 };
193 
194 /* Returns the number of set bits described by the settings
195  * of the node pointed to by nodep.
196  */
197 static sparsebit_num_t node_num_set(struct node *nodep)
198 {
199 	return nodep->num_after + __builtin_popcount(nodep->mask);
200 }
201 
202 /* Returns a pointer to the node that describes the
203  * lowest bit index.
204  */
205 static struct node *node_first(struct sparsebit *s)
206 {
207 	struct node *nodep;
208 
209 	for (nodep = s->root; nodep && nodep->left; nodep = nodep->left)
210 		;
211 
212 	return nodep;
213 }
214 
215 /* Returns a pointer to the node that describes the
216  * lowest bit index > the index of the node pointed to by np.
217  * Returns NULL if no node with a higher index exists.
218  */
219 static struct node *node_next(struct sparsebit *s, struct node *np)
220 {
221 	struct node *nodep = np;
222 
223 	/*
224 	 * If current node has a right child, next node is the left-most
225 	 * of the right child.
226 	 */
227 	if (nodep->right) {
228 		for (nodep = nodep->right; nodep->left; nodep = nodep->left)
229 			;
230 		return nodep;
231 	}
232 
233 	/*
234 	 * No right child.  Go up until node is left child of a parent.
235 	 * That parent is then the next node.
236 	 */
237 	while (nodep->parent && nodep == nodep->parent->right)
238 		nodep = nodep->parent;
239 
240 	return nodep->parent;
241 }
242 
243 /* Searches for and returns a pointer to the node that describes the
244  * highest index < the index of the node pointed to by np.
245  * Returns NULL if no node with a lower index exists.
246  */
247 static struct node *node_prev(struct sparsebit *s, struct node *np)
248 {
249 	struct node *nodep = np;
250 
251 	/*
252 	 * If current node has a left child, next node is the right-most
253 	 * of the left child.
254 	 */
255 	if (nodep->left) {
256 		for (nodep = nodep->left; nodep->right; nodep = nodep->right)
257 			;
258 		return (struct node *) nodep;
259 	}
260 
261 	/*
262 	 * No left child.  Go up until node is right child of a parent.
263 	 * That parent is then the next node.
264 	 */
265 	while (nodep->parent && nodep == nodep->parent->left)
266 		nodep = nodep->parent;
267 
268 	return (struct node *) nodep->parent;
269 }
270 
271 
272 /* Allocates space to hold a copy of the node sub-tree pointed to by
273  * subtree and duplicates the bit settings to the newly allocated nodes.
274  * Returns the newly allocated copy of subtree.
275  */
276 static struct node *node_copy_subtree(struct node *subtree)
277 {
278 	struct node *root;
279 
280 	/* Duplicate the node at the root of the subtree */
281 	root = calloc(1, sizeof(*root));
282 	if (!root) {
283 		perror("calloc");
284 		abort();
285 	}
286 
287 	root->idx = subtree->idx;
288 	root->mask = subtree->mask;
289 	root->num_after = subtree->num_after;
290 
291 	/* As needed, recursively duplicate the left and right subtrees */
292 	if (subtree->left) {
293 		root->left = node_copy_subtree(subtree->left);
294 		root->left->parent = root;
295 	}
296 
297 	if (subtree->right) {
298 		root->right = node_copy_subtree(subtree->right);
299 		root->right->parent = root;
300 	}
301 
302 	return root;
303 }
304 
305 /* Searches for and returns a pointer to the node that describes the setting
306  * of the bit given by idx.  A node describes the setting of a bit if its
307  * index is within the bits described by the mask bits or the number of
308  * contiguous bits set after the mask.  Returns NULL if there is no such node.
309  */
310 static struct node *node_find(struct sparsebit *s, sparsebit_idx_t idx)
311 {
312 	struct node *nodep;
313 
314 	/* Find the node that describes the setting of the bit at idx */
315 	for (nodep = s->root; nodep;
316 	     nodep = nodep->idx > idx ? nodep->left : nodep->right) {
317 		if (idx >= nodep->idx &&
318 		    idx <= nodep->idx + MASK_BITS + nodep->num_after - 1)
319 			break;
320 	}
321 
322 	return nodep;
323 }
324 
325 /* Entry Requirements:
326  *   + A node that describes the setting of idx is not already present.
327  *
328  * Adds a new node to describe the setting of the bit at the index given
329  * by idx.  Returns a pointer to the newly added node.
330  *
331  * TODO(lhuemill): Degenerate cases causes the tree to get unbalanced.
332  */
333 static struct node *node_add(struct sparsebit *s, sparsebit_idx_t idx)
334 {
335 	struct node *nodep, *parentp, *prev;
336 
337 	/* Allocate and initialize the new node. */
338 	nodep = calloc(1, sizeof(*nodep));
339 	if (!nodep) {
340 		perror("calloc");
341 		abort();
342 	}
343 
344 	nodep->idx = idx & -MASK_BITS;
345 
346 	/* If no nodes, set it up as the root node. */
347 	if (!s->root) {
348 		s->root = nodep;
349 		return nodep;
350 	}
351 
352 	/*
353 	 * Find the parent where the new node should be attached
354 	 * and add the node there.
355 	 */
356 	parentp = s->root;
357 	while (true) {
358 		if (idx < parentp->idx) {
359 			if (!parentp->left) {
360 				parentp->left = nodep;
361 				nodep->parent = parentp;
362 				break;
363 			}
364 			parentp = parentp->left;
365 		} else {
366 			assert(idx > parentp->idx + MASK_BITS + parentp->num_after - 1);
367 			if (!parentp->right) {
368 				parentp->right = nodep;
369 				nodep->parent = parentp;
370 				break;
371 			}
372 			parentp = parentp->right;
373 		}
374 	}
375 
376 	/*
377 	 * Does num_after bits of previous node overlap with the mask
378 	 * of the new node?  If so set the bits in the new nodes mask
379 	 * and reduce the previous nodes num_after.
380 	 */
381 	prev = node_prev(s, nodep);
382 	while (prev && prev->idx + MASK_BITS + prev->num_after - 1 >= nodep->idx) {
383 		unsigned int n1 = (prev->idx + MASK_BITS + prev->num_after - 1)
384 			- nodep->idx;
385 		assert(prev->num_after > 0);
386 		assert(n1 < MASK_BITS);
387 		assert(!(nodep->mask & (1 << n1)));
388 		nodep->mask |= (1 << n1);
389 		prev->num_after--;
390 	}
391 
392 	return nodep;
393 }
394 
395 /* Returns whether all the bits in the sparsebit array are set.  */
396 bool sparsebit_all_set(struct sparsebit *s)
397 {
398 	/*
399 	 * If any nodes there must be at least one bit set.  Only case
400 	 * where a bit is set and total num set is 0, is when all bits
401 	 * are set.
402 	 */
403 	return s->root && s->num_set == 0;
404 }
405 
406 /* Clears all bits described by the node pointed to by nodep, then
407  * removes the node.
408  */
409 static void node_rm(struct sparsebit *s, struct node *nodep)
410 {
411 	struct node *tmp;
412 	sparsebit_num_t num_set;
413 
414 	num_set = node_num_set(nodep);
415 	assert(s->num_set >= num_set || sparsebit_all_set(s));
416 	s->num_set -= node_num_set(nodep);
417 
418 	/* Have both left and right child */
419 	if (nodep->left && nodep->right) {
420 		/*
421 		 * Move left children to the leftmost leaf node
422 		 * of the right child.
423 		 */
424 		for (tmp = nodep->right; tmp->left; tmp = tmp->left)
425 			;
426 		tmp->left = nodep->left;
427 		nodep->left = NULL;
428 		tmp->left->parent = tmp;
429 	}
430 
431 	/* Left only child */
432 	if (nodep->left) {
433 		if (!nodep->parent) {
434 			s->root = nodep->left;
435 			nodep->left->parent = NULL;
436 		} else {
437 			nodep->left->parent = nodep->parent;
438 			if (nodep == nodep->parent->left)
439 				nodep->parent->left = nodep->left;
440 			else {
441 				assert(nodep == nodep->parent->right);
442 				nodep->parent->right = nodep->left;
443 			}
444 		}
445 
446 		nodep->parent = nodep->left = nodep->right = NULL;
447 		free(nodep);
448 
449 		return;
450 	}
451 
452 
453 	/* Right only child */
454 	if (nodep->right) {
455 		if (!nodep->parent) {
456 			s->root = nodep->right;
457 			nodep->right->parent = NULL;
458 		} else {
459 			nodep->right->parent = nodep->parent;
460 			if (nodep == nodep->parent->left)
461 				nodep->parent->left = nodep->right;
462 			else {
463 				assert(nodep == nodep->parent->right);
464 				nodep->parent->right = nodep->right;
465 			}
466 		}
467 
468 		nodep->parent = nodep->left = nodep->right = NULL;
469 		free(nodep);
470 
471 		return;
472 	}
473 
474 	/* Leaf Node */
475 	if (!nodep->parent) {
476 		s->root = NULL;
477 	} else {
478 		if (nodep->parent->left == nodep)
479 			nodep->parent->left = NULL;
480 		else {
481 			assert(nodep == nodep->parent->right);
482 			nodep->parent->right = NULL;
483 		}
484 	}
485 
486 	nodep->parent = nodep->left = nodep->right = NULL;
487 	free(nodep);
488 
489 	return;
490 }
491 
492 /* Splits the node containing the bit at idx so that there is a node
493  * that starts at the specified index.  If no such node exists, a new
494  * node at the specified index is created.  Returns the new node.
495  *
496  * idx must start of a mask boundary.
497  */
498 static struct node *node_split(struct sparsebit *s, sparsebit_idx_t idx)
499 {
500 	struct node *nodep1, *nodep2;
501 	sparsebit_idx_t offset;
502 	sparsebit_num_t orig_num_after;
503 
504 	assert(!(idx % MASK_BITS));
505 
506 	/*
507 	 * Is there a node that describes the setting of idx?
508 	 * If not, add it.
509 	 */
510 	nodep1 = node_find(s, idx);
511 	if (!nodep1)
512 		return node_add(s, idx);
513 
514 	/*
515 	 * All done if the starting index of the node is where the
516 	 * split should occur.
517 	 */
518 	if (nodep1->idx == idx)
519 		return nodep1;
520 
521 	/*
522 	 * Split point not at start of mask, so it must be part of
523 	 * bits described by num_after.
524 	 */
525 
526 	/*
527 	 * Calculate offset within num_after for where the split is
528 	 * to occur.
529 	 */
530 	offset = idx - (nodep1->idx + MASK_BITS);
531 	orig_num_after = nodep1->num_after;
532 
533 	/*
534 	 * Add a new node to describe the bits starting at
535 	 * the split point.
536 	 */
537 	nodep1->num_after = offset;
538 	nodep2 = node_add(s, idx);
539 
540 	/* Move bits after the split point into the new node */
541 	nodep2->num_after = orig_num_after - offset;
542 	if (nodep2->num_after >= MASK_BITS) {
543 		nodep2->mask = ~(mask_t) 0;
544 		nodep2->num_after -= MASK_BITS;
545 	} else {
546 		nodep2->mask = (1 << nodep2->num_after) - 1;
547 		nodep2->num_after = 0;
548 	}
549 
550 	return nodep2;
551 }
552 
553 /* Iteratively reduces the node pointed to by nodep and its adjacent
554  * nodes into a more compact form.  For example, a node with a mask with
555  * all bits set adjacent to a previous node, will get combined into a
556  * single node with an increased num_after setting.
557  *
558  * After each reduction, a further check is made to see if additional
559  * reductions are possible with the new previous and next nodes.  Note,
560  * a search for a reduction is only done across the nodes nearest nodep
561  * and those that became part of a reduction.  Reductions beyond nodep
562  * and the adjacent nodes that are reduced are not discovered.  It is the
563  * responsibility of the caller to pass a nodep that is within one node
564  * of each possible reduction.
565  *
566  * This function does not fix the temporary violation of all invariants.
567  * For example it does not fix the case where the bit settings described
568  * by two or more nodes overlap.  Such a violation introduces the potential
569  * complication of a bit setting for a specific index having different settings
570  * in different nodes.  This would then introduce the further complication
571  * of which node has the correct setting of the bit and thus such conditions
572  * are not allowed.
573  *
574  * This function is designed to fix invariant violations that are introduced
575  * by node_split() and by changes to the nodes mask or num_after members.
576  * For example, when setting a bit within a nodes mask, the function that
577  * sets the bit doesn't have to worry about whether the setting of that
578  * bit caused the mask to have leading only or trailing only bits set.
579  * Instead, the function can call node_reduce(), with nodep equal to the
580  * node address that it set a mask bit in, and node_reduce() will notice
581  * the cases of leading or trailing only bits and that there is an
582  * adjacent node that the bit settings could be merged into.
583  *
584  * This implementation specifically detects and corrects violation of the
585  * following invariants:
586  *
587  *   + Node are only used to represent bits that are set.
588  *     Nodes with a mask of 0 and num_after of 0 are not allowed.
589  *
590  *   + The setting of at least one bit is always described in a nodes
591  *     mask (mask >= 1).
592  *
593  *   + A node with all mask bits set only occurs when the last bit
594  *     described by the previous node is not equal to this nodes
595  *     starting index - 1.  All such occurences of this condition are
596  *     avoided by moving the setting of the nodes mask bits into
597  *     the previous nodes num_after setting.
598  */
599 static void node_reduce(struct sparsebit *s, struct node *nodep)
600 {
601 	bool reduction_performed;
602 
603 	do {
604 		reduction_performed = false;
605 		struct node *prev, *next, *tmp;
606 
607 		/* 1) Potential reductions within the current node. */
608 
609 		/* Nodes with all bits cleared may be removed. */
610 		if (nodep->mask == 0 && nodep->num_after == 0) {
611 			/*
612 			 * About to remove the node pointed to by
613 			 * nodep, which normally would cause a problem
614 			 * for the next pass through the reduction loop,
615 			 * because the node at the starting point no longer
616 			 * exists.  This potential problem is handled
617 			 * by first remembering the location of the next
618 			 * or previous nodes.  Doesn't matter which, because
619 			 * once the node at nodep is removed, there will be
620 			 * no other nodes between prev and next.
621 			 *
622 			 * Note, the checks performed on nodep against both
623 			 * both prev and next both check for an adjacent
624 			 * node that can be reduced into a single node.  As
625 			 * such, after removing the node at nodep, doesn't
626 			 * matter whether the nodep for the next pass
627 			 * through the loop is equal to the previous pass
628 			 * prev or next node.  Either way, on the next pass
629 			 * the one not selected will become either the
630 			 * prev or next node.
631 			 */
632 			tmp = node_next(s, nodep);
633 			if (!tmp)
634 				tmp = node_prev(s, nodep);
635 
636 			node_rm(s, nodep);
637 			nodep = NULL;
638 
639 			nodep = tmp;
640 			reduction_performed = true;
641 			continue;
642 		}
643 
644 		/*
645 		 * When the mask is 0, can reduce the amount of num_after
646 		 * bits by moving the initial num_after bits into the mask.
647 		 */
648 		if (nodep->mask == 0) {
649 			assert(nodep->num_after != 0);
650 			assert(nodep->idx + MASK_BITS > nodep->idx);
651 
652 			nodep->idx += MASK_BITS;
653 
654 			if (nodep->num_after >= MASK_BITS) {
655 				nodep->mask = ~0;
656 				nodep->num_after -= MASK_BITS;
657 			} else {
658 				nodep->mask = (1u << nodep->num_after) - 1;
659 				nodep->num_after = 0;
660 			}
661 
662 			reduction_performed = true;
663 			continue;
664 		}
665 
666 		/*
667 		 * 2) Potential reductions between the current and
668 		 * previous nodes.
669 		 */
670 		prev = node_prev(s, nodep);
671 		if (prev) {
672 			sparsebit_idx_t prev_highest_bit;
673 
674 			/* Nodes with no bits set can be removed. */
675 			if (prev->mask == 0 && prev->num_after == 0) {
676 				node_rm(s, prev);
677 
678 				reduction_performed = true;
679 				continue;
680 			}
681 
682 			/*
683 			 * All mask bits set and previous node has
684 			 * adjacent index.
685 			 */
686 			if (nodep->mask + 1 == 0 &&
687 			    prev->idx + MASK_BITS == nodep->idx) {
688 				prev->num_after += MASK_BITS + nodep->num_after;
689 				nodep->mask = 0;
690 				nodep->num_after = 0;
691 
692 				reduction_performed = true;
693 				continue;
694 			}
695 
696 			/*
697 			 * Is node adjacent to previous node and the node
698 			 * contains a single contiguous range of bits
699 			 * starting from the beginning of the mask?
700 			 */
701 			prev_highest_bit = prev->idx + MASK_BITS - 1 + prev->num_after;
702 			if (prev_highest_bit + 1 == nodep->idx &&
703 			    (nodep->mask | (nodep->mask >> 1)) == nodep->mask) {
704 				/*
705 				 * How many contiguous bits are there?
706 				 * Is equal to the total number of set
707 				 * bits, due to an earlier check that
708 				 * there is a single contiguous range of
709 				 * set bits.
710 				 */
711 				unsigned int num_contiguous
712 					= __builtin_popcount(nodep->mask);
713 				assert((num_contiguous > 0) &&
714 				       ((1ULL << num_contiguous) - 1) == nodep->mask);
715 
716 				prev->num_after += num_contiguous;
717 				nodep->mask = 0;
718 
719 				/*
720 				 * For predictable performance, handle special
721 				 * case where all mask bits are set and there
722 				 * is a non-zero num_after setting.  This code
723 				 * is functionally correct without the following
724 				 * conditionalized statements, but without them
725 				 * the value of num_after is only reduced by
726 				 * the number of mask bits per pass.  There are
727 				 * cases where num_after can be close to 2^64.
728 				 * Without this code it could take nearly
729 				 * (2^64) / 32 passes to perform the full
730 				 * reduction.
731 				 */
732 				if (num_contiguous == MASK_BITS) {
733 					prev->num_after += nodep->num_after;
734 					nodep->num_after = 0;
735 				}
736 
737 				reduction_performed = true;
738 				continue;
739 			}
740 		}
741 
742 		/*
743 		 * 3) Potential reductions between the current and
744 		 * next nodes.
745 		 */
746 		next = node_next(s, nodep);
747 		if (next) {
748 			/* Nodes with no bits set can be removed. */
749 			if (next->mask == 0 && next->num_after == 0) {
750 				node_rm(s, next);
751 				reduction_performed = true;
752 				continue;
753 			}
754 
755 			/*
756 			 * Is next node index adjacent to current node
757 			 * and has a mask with all bits set?
758 			 */
759 			if (next->idx == nodep->idx + MASK_BITS + nodep->num_after &&
760 			    next->mask == ~(mask_t) 0) {
761 				nodep->num_after += MASK_BITS;
762 				next->mask = 0;
763 				nodep->num_after += next->num_after;
764 				next->num_after = 0;
765 
766 				node_rm(s, next);
767 				next = NULL;
768 
769 				reduction_performed = true;
770 				continue;
771 			}
772 		}
773 	} while (nodep && reduction_performed);
774 }
775 
776 /* Returns whether the bit at the index given by idx, within the
777  * sparsebit array is set or not.
778  */
779 bool sparsebit_is_set(struct sparsebit *s, sparsebit_idx_t idx)
780 {
781 	struct node *nodep;
782 
783 	/* Find the node that describes the setting of the bit at idx */
784 	for (nodep = s->root; nodep;
785 	     nodep = nodep->idx > idx ? nodep->left : nodep->right)
786 		if (idx >= nodep->idx &&
787 		    idx <= nodep->idx + MASK_BITS + nodep->num_after - 1)
788 			goto have_node;
789 
790 	return false;
791 
792 have_node:
793 	/* Bit is set if it is any of the bits described by num_after */
794 	if (nodep->num_after && idx >= nodep->idx + MASK_BITS)
795 		return true;
796 
797 	/* Is the corresponding mask bit set */
798 	assert(idx >= nodep->idx && idx - nodep->idx < MASK_BITS);
799 	return !!(nodep->mask & (1 << (idx - nodep->idx)));
800 }
801 
802 /* Within the sparsebit array pointed to by s, sets the bit
803  * at the index given by idx.
804  */
805 static void bit_set(struct sparsebit *s, sparsebit_idx_t idx)
806 {
807 	struct node *nodep;
808 
809 	/* Skip bits that are already set */
810 	if (sparsebit_is_set(s, idx))
811 		return;
812 
813 	/*
814 	 * Get a node where the bit at idx is described by the mask.
815 	 * The node_split will also create a node, if there isn't
816 	 * already a node that describes the setting of bit.
817 	 */
818 	nodep = node_split(s, idx & -MASK_BITS);
819 
820 	/* Set the bit within the nodes mask */
821 	assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1);
822 	assert(!(nodep->mask & (1 << (idx - nodep->idx))));
823 	nodep->mask |= 1 << (idx - nodep->idx);
824 	s->num_set++;
825 
826 	node_reduce(s, nodep);
827 }
828 
829 /* Within the sparsebit array pointed to by s, clears the bit
830  * at the index given by idx.
831  */
832 static void bit_clear(struct sparsebit *s, sparsebit_idx_t idx)
833 {
834 	struct node *nodep;
835 
836 	/* Skip bits that are already cleared */
837 	if (!sparsebit_is_set(s, idx))
838 		return;
839 
840 	/* Is there a node that describes the setting of this bit? */
841 	nodep = node_find(s, idx);
842 	if (!nodep)
843 		return;
844 
845 	/*
846 	 * If a num_after bit, split the node, so that the bit is
847 	 * part of a node mask.
848 	 */
849 	if (idx >= nodep->idx + MASK_BITS)
850 		nodep = node_split(s, idx & -MASK_BITS);
851 
852 	/*
853 	 * After node_split above, bit at idx should be within the mask.
854 	 * Clear that bit.
855 	 */
856 	assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1);
857 	assert(nodep->mask & (1 << (idx - nodep->idx)));
858 	nodep->mask &= ~(1 << (idx - nodep->idx));
859 	assert(s->num_set > 0 || sparsebit_all_set(s));
860 	s->num_set--;
861 
862 	node_reduce(s, nodep);
863 }
864 
865 /* Recursively dumps to the FILE stream given by stream the contents
866  * of the sub-tree of nodes pointed to by nodep.  Each line of output
867  * is prefixed by the number of spaces given by indent.  On each
868  * recursion, the indent amount is increased by 2.  This causes nodes
869  * at each level deeper into the binary search tree to be displayed
870  * with a greater indent.
871  */
872 static void dump_nodes(FILE *stream, struct node *nodep,
873 	unsigned int indent)
874 {
875 	char *node_type;
876 
877 	/* Dump contents of node */
878 	if (!nodep->parent)
879 		node_type = "root";
880 	else if (nodep == nodep->parent->left)
881 		node_type = "left";
882 	else {
883 		assert(nodep == nodep->parent->right);
884 		node_type = "right";
885 	}
886 	fprintf(stream, "%*s---- %s nodep: %p\n", indent, "", node_type, nodep);
887 	fprintf(stream, "%*s  parent: %p left: %p right: %p\n", indent, "",
888 		nodep->parent, nodep->left, nodep->right);
889 	fprintf(stream, "%*s  idx: 0x%lx mask: 0x%x num_after: 0x%lx\n",
890 		indent, "", nodep->idx, nodep->mask, nodep->num_after);
891 
892 	/* If present, dump contents of left child nodes */
893 	if (nodep->left)
894 		dump_nodes(stream, nodep->left, indent + 2);
895 
896 	/* If present, dump contents of right child nodes */
897 	if (nodep->right)
898 		dump_nodes(stream, nodep->right, indent + 2);
899 }
900 
901 static inline sparsebit_idx_t node_first_set(struct node *nodep, int start)
902 {
903 	mask_t leading = (mask_t)1 << start;
904 	int n1 = __builtin_ctz(nodep->mask & -leading);
905 
906 	return nodep->idx + n1;
907 }
908 
909 static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start)
910 {
911 	mask_t leading = (mask_t)1 << start;
912 	int n1 = __builtin_ctz(~nodep->mask & -leading);
913 
914 	return nodep->idx + n1;
915 }
916 
917 /* Dumps to the FILE stream specified by stream, the implementation dependent
918  * internal state of s.  Each line of output is prefixed with the number
919  * of spaces given by indent.  The output is completely implementation
920  * dependent and subject to change.  Output from this function should only
921  * be used for diagnostic purposes.  For example, this function can be
922  * used by test cases after they detect an unexpected condition, as a means
923  * to capture diagnostic information.
924  */
925 static void sparsebit_dump_internal(FILE *stream, struct sparsebit *s,
926 	unsigned int indent)
927 {
928 	/* Dump the contents of s */
929 	fprintf(stream, "%*sroot: %p\n", indent, "", s->root);
930 	fprintf(stream, "%*snum_set: 0x%lx\n", indent, "", s->num_set);
931 
932 	if (s->root)
933 		dump_nodes(stream, s->root, indent);
934 }
935 
936 /* Allocates and returns a new sparsebit array. The initial state
937  * of the newly allocated sparsebit array has all bits cleared.
938  */
939 struct sparsebit *sparsebit_alloc(void)
940 {
941 	struct sparsebit *s;
942 
943 	/* Allocate top level structure. */
944 	s = calloc(1, sizeof(*s));
945 	if (!s) {
946 		perror("calloc");
947 		abort();
948 	}
949 
950 	return s;
951 }
952 
953 /* Frees the implementation dependent data for the sparsebit array
954  * pointed to by s and poisons the pointer to that data.
955  */
956 void sparsebit_free(struct sparsebit **sbitp)
957 {
958 	struct sparsebit *s = *sbitp;
959 
960 	if (!s)
961 		return;
962 
963 	sparsebit_clear_all(s);
964 	free(s);
965 	*sbitp = NULL;
966 }
967 
968 /* Makes a copy of the sparsebit array given by s, to the sparsebit
969  * array given by d.  Note, d must have already been allocated via
970  * sparsebit_alloc().  It can though already have bits set, which
971  * if different from src will be cleared.
972  */
973 void sparsebit_copy(struct sparsebit *d, struct sparsebit *s)
974 {
975 	/* First clear any bits already set in the destination */
976 	sparsebit_clear_all(d);
977 
978 	if (s->root) {
979 		d->root = node_copy_subtree(s->root);
980 		d->num_set = s->num_set;
981 	}
982 }
983 
984 /* Returns whether num consecutive bits starting at idx are all set.  */
985 bool sparsebit_is_set_num(struct sparsebit *s,
986 	sparsebit_idx_t idx, sparsebit_num_t num)
987 {
988 	sparsebit_idx_t next_cleared;
989 
990 	assert(num > 0);
991 	assert(idx + num - 1 >= idx);
992 
993 	/* With num > 0, the first bit must be set. */
994 	if (!sparsebit_is_set(s, idx))
995 		return false;
996 
997 	/* Find the next cleared bit */
998 	next_cleared = sparsebit_next_clear(s, idx);
999 
1000 	/*
1001 	 * If no cleared bits beyond idx, then there are at least num
1002 	 * set bits. idx + num doesn't wrap.  Otherwise check if
1003 	 * there are enough set bits between idx and the next cleared bit.
1004 	 */
1005 	return next_cleared == 0 || next_cleared - idx >= num;
1006 }
1007 
1008 /* Returns whether the bit at the index given by idx.  */
1009 bool sparsebit_is_clear(struct sparsebit *s,
1010 	sparsebit_idx_t idx)
1011 {
1012 	return !sparsebit_is_set(s, idx);
1013 }
1014 
1015 /* Returns whether num consecutive bits starting at idx are all cleared.  */
1016 bool sparsebit_is_clear_num(struct sparsebit *s,
1017 	sparsebit_idx_t idx, sparsebit_num_t num)
1018 {
1019 	sparsebit_idx_t next_set;
1020 
1021 	assert(num > 0);
1022 	assert(idx + num - 1 >= idx);
1023 
1024 	/* With num > 0, the first bit must be cleared. */
1025 	if (!sparsebit_is_clear(s, idx))
1026 		return false;
1027 
1028 	/* Find the next set bit */
1029 	next_set = sparsebit_next_set(s, idx);
1030 
1031 	/*
1032 	 * If no set bits beyond idx, then there are at least num
1033 	 * cleared bits. idx + num doesn't wrap.  Otherwise check if
1034 	 * there are enough cleared bits between idx and the next set bit.
1035 	 */
1036 	return next_set == 0 || next_set - idx >= num;
1037 }
1038 
1039 /* Returns the total number of bits set.  Note: 0 is also returned for
1040  * the case of all bits set.  This is because with all bits set, there
1041  * is 1 additional bit set beyond what can be represented in the return
1042  * value.  Use sparsebit_any_set(), instead of sparsebit_num_set() > 0,
1043  * to determine if the sparsebit array has any bits set.
1044  */
1045 sparsebit_num_t sparsebit_num_set(struct sparsebit *s)
1046 {
1047 	return s->num_set;
1048 }
1049 
1050 /* Returns whether any bit is set in the sparsebit array.  */
1051 bool sparsebit_any_set(struct sparsebit *s)
1052 {
1053 	/*
1054 	 * Nodes only describe set bits.  If any nodes then there
1055 	 * is at least 1 bit set.
1056 	 */
1057 	if (!s->root)
1058 		return false;
1059 
1060 	/*
1061 	 * Every node should have a non-zero mask.  For now will
1062 	 * just assure that the root node has a non-zero mask,
1063 	 * which is a quick check that at least 1 bit is set.
1064 	 */
1065 	assert(s->root->mask != 0);
1066 	assert(s->num_set > 0 ||
1067 	       (s->root->num_after == ((sparsebit_num_t) 0) - MASK_BITS &&
1068 		s->root->mask == ~(mask_t) 0));
1069 
1070 	return true;
1071 }
1072 
1073 /* Returns whether all the bits in the sparsebit array are cleared.  */
1074 bool sparsebit_all_clear(struct sparsebit *s)
1075 {
1076 	return !sparsebit_any_set(s);
1077 }
1078 
1079 /* Returns whether all the bits in the sparsebit array are set.  */
1080 bool sparsebit_any_clear(struct sparsebit *s)
1081 {
1082 	return !sparsebit_all_set(s);
1083 }
1084 
1085 /* Returns the index of the first set bit.  Abort if no bits are set.
1086  */
1087 sparsebit_idx_t sparsebit_first_set(struct sparsebit *s)
1088 {
1089 	struct node *nodep;
1090 
1091 	/* Validate at least 1 bit is set */
1092 	assert(sparsebit_any_set(s));
1093 
1094 	nodep = node_first(s);
1095 	return node_first_set(nodep, 0);
1096 }
1097 
1098 /* Returns the index of the first cleared bit.  Abort if
1099  * no bits are cleared.
1100  */
1101 sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s)
1102 {
1103 	struct node *nodep1, *nodep2;
1104 
1105 	/* Validate at least 1 bit is cleared. */
1106 	assert(sparsebit_any_clear(s));
1107 
1108 	/* If no nodes or first node index > 0 then lowest cleared is 0 */
1109 	nodep1 = node_first(s);
1110 	if (!nodep1 || nodep1->idx > 0)
1111 		return 0;
1112 
1113 	/* Does the mask in the first node contain any cleared bits. */
1114 	if (nodep1->mask != ~(mask_t) 0)
1115 		return node_first_clear(nodep1, 0);
1116 
1117 	/*
1118 	 * All mask bits set in first node.  If there isn't a second node
1119 	 * then the first cleared bit is the first bit after the bits
1120 	 * described by the first node.
1121 	 */
1122 	nodep2 = node_next(s, nodep1);
1123 	if (!nodep2) {
1124 		/*
1125 		 * No second node.  First cleared bit is first bit beyond
1126 		 * bits described by first node.
1127 		 */
1128 		assert(nodep1->mask == ~(mask_t) 0);
1129 		assert(nodep1->idx + MASK_BITS + nodep1->num_after != (sparsebit_idx_t) 0);
1130 		return nodep1->idx + MASK_BITS + nodep1->num_after;
1131 	}
1132 
1133 	/*
1134 	 * There is a second node.
1135 	 * If it is not adjacent to the first node, then there is a gap
1136 	 * of cleared bits between the nodes, and the first cleared bit
1137 	 * is the first bit within the gap.
1138 	 */
1139 	if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx)
1140 		return nodep1->idx + MASK_BITS + nodep1->num_after;
1141 
1142 	/*
1143 	 * Second node is adjacent to the first node.
1144 	 * Because it is adjacent, its mask should be non-zero.  If all
1145 	 * its mask bits are set, then with it being adjacent, it should
1146 	 * have had the mask bits moved into the num_after setting of the
1147 	 * previous node.
1148 	 */
1149 	return node_first_clear(nodep2, 0);
1150 }
1151 
1152 /* Returns index of next bit set within s after the index given by prev.
1153  * Returns 0 if there are no bits after prev that are set.
1154  */
1155 sparsebit_idx_t sparsebit_next_set(struct sparsebit *s,
1156 	sparsebit_idx_t prev)
1157 {
1158 	sparsebit_idx_t lowest_possible = prev + 1;
1159 	sparsebit_idx_t start;
1160 	struct node *nodep;
1161 
1162 	/* A bit after the highest index can't be set. */
1163 	if (lowest_possible == 0)
1164 		return 0;
1165 
1166 	/*
1167 	 * Find the leftmost 'candidate' overlapping or to the right
1168 	 * of lowest_possible.
1169 	 */
1170 	struct node *candidate = NULL;
1171 
1172 	/* True iff lowest_possible is within candidate */
1173 	bool contains = false;
1174 
1175 	/*
1176 	 * Find node that describes setting of bit at lowest_possible.
1177 	 * If such a node doesn't exist, find the node with the lowest
1178 	 * starting index that is > lowest_possible.
1179 	 */
1180 	for (nodep = s->root; nodep;) {
1181 		if ((nodep->idx + MASK_BITS + nodep->num_after - 1)
1182 			>= lowest_possible) {
1183 			candidate = nodep;
1184 			if (candidate->idx <= lowest_possible) {
1185 				contains = true;
1186 				break;
1187 			}
1188 			nodep = nodep->left;
1189 		} else {
1190 			nodep = nodep->right;
1191 		}
1192 	}
1193 	if (!candidate)
1194 		return 0;
1195 
1196 	assert(candidate->mask != 0);
1197 
1198 	/* Does the candidate node describe the setting of lowest_possible? */
1199 	if (!contains) {
1200 		/*
1201 		 * Candidate doesn't describe setting of bit at lowest_possible.
1202 		 * Candidate points to the first node with a starting index
1203 		 * > lowest_possible.
1204 		 */
1205 		assert(candidate->idx > lowest_possible);
1206 
1207 		return node_first_set(candidate, 0);
1208 	}
1209 
1210 	/*
1211 	 * Candidate describes setting of bit at lowest_possible.
1212 	 * Note: although the node describes the setting of the bit
1213 	 * at lowest_possible, its possible that its setting and the
1214 	 * setting of all latter bits described by this node are 0.
1215 	 * For now, just handle the cases where this node describes
1216 	 * a bit at or after an index of lowest_possible that is set.
1217 	 */
1218 	start = lowest_possible - candidate->idx;
1219 
1220 	if (start < MASK_BITS && candidate->mask >= (1 << start))
1221 		return node_first_set(candidate, start);
1222 
1223 	if (candidate->num_after) {
1224 		sparsebit_idx_t first_num_after_idx = candidate->idx + MASK_BITS;
1225 
1226 		return lowest_possible < first_num_after_idx
1227 			? first_num_after_idx : lowest_possible;
1228 	}
1229 
1230 	/*
1231 	 * Although candidate node describes setting of bit at
1232 	 * the index of lowest_possible, all bits at that index and
1233 	 * latter that are described by candidate are cleared.  With
1234 	 * this, the next bit is the first bit in the next node, if
1235 	 * such a node exists.  If a next node doesn't exist, then
1236 	 * there is no next set bit.
1237 	 */
1238 	candidate = node_next(s, candidate);
1239 	if (!candidate)
1240 		return 0;
1241 
1242 	return node_first_set(candidate, 0);
1243 }
1244 
1245 /* Returns index of next bit cleared within s after the index given by prev.
1246  * Returns 0 if there are no bits after prev that are cleared.
1247  */
1248 sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s,
1249 	sparsebit_idx_t prev)
1250 {
1251 	sparsebit_idx_t lowest_possible = prev + 1;
1252 	sparsebit_idx_t idx;
1253 	struct node *nodep1, *nodep2;
1254 
1255 	/* A bit after the highest index can't be set. */
1256 	if (lowest_possible == 0)
1257 		return 0;
1258 
1259 	/*
1260 	 * Does a node describing the setting of lowest_possible exist?
1261 	 * If not, the bit at lowest_possible is cleared.
1262 	 */
1263 	nodep1 = node_find(s, lowest_possible);
1264 	if (!nodep1)
1265 		return lowest_possible;
1266 
1267 	/* Does a mask bit in node 1 describe the next cleared bit. */
1268 	for (idx = lowest_possible - nodep1->idx; idx < MASK_BITS; idx++)
1269 		if (!(nodep1->mask & (1 << idx)))
1270 			return nodep1->idx + idx;
1271 
1272 	/*
1273 	 * Next cleared bit is not described by node 1.  If there
1274 	 * isn't a next node, then next cleared bit is described
1275 	 * by bit after the bits described by the first node.
1276 	 */
1277 	nodep2 = node_next(s, nodep1);
1278 	if (!nodep2)
1279 		return nodep1->idx + MASK_BITS + nodep1->num_after;
1280 
1281 	/*
1282 	 * There is a second node.
1283 	 * If it is not adjacent to the first node, then there is a gap
1284 	 * of cleared bits between the nodes, and the next cleared bit
1285 	 * is the first bit within the gap.
1286 	 */
1287 	if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx)
1288 		return nodep1->idx + MASK_BITS + nodep1->num_after;
1289 
1290 	/*
1291 	 * Second node is adjacent to the first node.
1292 	 * Because it is adjacent, its mask should be non-zero.  If all
1293 	 * its mask bits are set, then with it being adjacent, it should
1294 	 * have had the mask bits moved into the num_after setting of the
1295 	 * previous node.
1296 	 */
1297 	return node_first_clear(nodep2, 0);
1298 }
1299 
1300 /* Starting with the index 1 greater than the index given by start, finds
1301  * and returns the index of the first sequence of num consecutively set
1302  * bits.  Returns a value of 0 of no such sequence exists.
1303  */
1304 sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s,
1305 	sparsebit_idx_t start, sparsebit_num_t num)
1306 {
1307 	sparsebit_idx_t idx;
1308 
1309 	assert(num >= 1);
1310 
1311 	for (idx = sparsebit_next_set(s, start);
1312 		idx != 0 && idx + num - 1 >= idx;
1313 		idx = sparsebit_next_set(s, idx)) {
1314 		assert(sparsebit_is_set(s, idx));
1315 
1316 		/*
1317 		 * Does the sequence of bits starting at idx consist of
1318 		 * num set bits?
1319 		 */
1320 		if (sparsebit_is_set_num(s, idx, num))
1321 			return idx;
1322 
1323 		/*
1324 		 * Sequence of set bits at idx isn't large enough.
1325 		 * Skip this entire sequence of set bits.
1326 		 */
1327 		idx = sparsebit_next_clear(s, idx);
1328 		if (idx == 0)
1329 			return 0;
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 /* Starting with the index 1 greater than the index given by start, finds
1336  * and returns the index of the first sequence of num consecutively cleared
1337  * bits.  Returns a value of 0 of no such sequence exists.
1338  */
1339 sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *s,
1340 	sparsebit_idx_t start, sparsebit_num_t num)
1341 {
1342 	sparsebit_idx_t idx;
1343 
1344 	assert(num >= 1);
1345 
1346 	for (idx = sparsebit_next_clear(s, start);
1347 		idx != 0 && idx + num - 1 >= idx;
1348 		idx = sparsebit_next_clear(s, idx)) {
1349 		assert(sparsebit_is_clear(s, idx));
1350 
1351 		/*
1352 		 * Does the sequence of bits starting at idx consist of
1353 		 * num cleared bits?
1354 		 */
1355 		if (sparsebit_is_clear_num(s, idx, num))
1356 			return idx;
1357 
1358 		/*
1359 		 * Sequence of cleared bits at idx isn't large enough.
1360 		 * Skip this entire sequence of cleared bits.
1361 		 */
1362 		idx = sparsebit_next_set(s, idx);
1363 		if (idx == 0)
1364 			return 0;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 /* Sets the bits * in the inclusive range idx through idx + num - 1.  */
1371 void sparsebit_set_num(struct sparsebit *s,
1372 	sparsebit_idx_t start, sparsebit_num_t num)
1373 {
1374 	struct node *nodep, *next;
1375 	unsigned int n1;
1376 	sparsebit_idx_t idx;
1377 	sparsebit_num_t n;
1378 	sparsebit_idx_t middle_start, middle_end;
1379 
1380 	assert(num > 0);
1381 	assert(start + num - 1 >= start);
1382 
1383 	/*
1384 	 * Leading - bits before first mask boundary.
1385 	 *
1386 	 * TODO(lhuemill): With some effort it may be possible to
1387 	 *   replace the following loop with a sequential sequence
1388 	 *   of statements.  High level sequence would be:
1389 	 *
1390 	 *     1. Use node_split() to force node that describes setting
1391 	 *        of idx to be within the mask portion of a node.
1392 	 *     2. Form mask of bits to be set.
1393 	 *     3. Determine number of mask bits already set in the node
1394 	 *        and store in a local variable named num_already_set.
1395 	 *     4. Set the appropriate mask bits within the node.
1396 	 *     5. Increment struct sparsebit_pvt num_set member
1397 	 *        by the number of bits that were actually set.
1398 	 *        Exclude from the counts bits that were already set.
1399 	 *     6. Before returning to the caller, use node_reduce() to
1400 	 *        handle the multiple corner cases that this method
1401 	 *        introduces.
1402 	 */
1403 	for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--)
1404 		bit_set(s, idx);
1405 
1406 	/* Middle - bits spanning one or more entire mask */
1407 	middle_start = idx;
1408 	middle_end = middle_start + (n & -MASK_BITS) - 1;
1409 	if (n >= MASK_BITS) {
1410 		nodep = node_split(s, middle_start);
1411 
1412 		/*
1413 		 * As needed, split just after end of middle bits.
1414 		 * No split needed if end of middle bits is at highest
1415 		 * supported bit index.
1416 		 */
1417 		if (middle_end + 1 > middle_end)
1418 			(void) node_split(s, middle_end + 1);
1419 
1420 		/* Delete nodes that only describe bits within the middle. */
1421 		for (next = node_next(s, nodep);
1422 			next && (next->idx < middle_end);
1423 			next = node_next(s, nodep)) {
1424 			assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end);
1425 			node_rm(s, next);
1426 			next = NULL;
1427 		}
1428 
1429 		/* As needed set each of the mask bits */
1430 		for (n1 = 0; n1 < MASK_BITS; n1++) {
1431 			if (!(nodep->mask & (1 << n1))) {
1432 				nodep->mask |= 1 << n1;
1433 				s->num_set++;
1434 			}
1435 		}
1436 
1437 		s->num_set -= nodep->num_after;
1438 		nodep->num_after = middle_end - middle_start + 1 - MASK_BITS;
1439 		s->num_set += nodep->num_after;
1440 
1441 		node_reduce(s, nodep);
1442 	}
1443 	idx = middle_end + 1;
1444 	n -= middle_end - middle_start + 1;
1445 
1446 	/* Trailing - bits at and beyond last mask boundary */
1447 	assert(n < MASK_BITS);
1448 	for (; n > 0; idx++, n--)
1449 		bit_set(s, idx);
1450 }
1451 
1452 /* Clears the bits * in the inclusive range idx through idx + num - 1.  */
1453 void sparsebit_clear_num(struct sparsebit *s,
1454 	sparsebit_idx_t start, sparsebit_num_t num)
1455 {
1456 	struct node *nodep, *next;
1457 	unsigned int n1;
1458 	sparsebit_idx_t idx;
1459 	sparsebit_num_t n;
1460 	sparsebit_idx_t middle_start, middle_end;
1461 
1462 	assert(num > 0);
1463 	assert(start + num - 1 >= start);
1464 
1465 	/* Leading - bits before first mask boundary */
1466 	for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--)
1467 		bit_clear(s, idx);
1468 
1469 	/* Middle - bits spanning one or more entire mask */
1470 	middle_start = idx;
1471 	middle_end = middle_start + (n & -MASK_BITS) - 1;
1472 	if (n >= MASK_BITS) {
1473 		nodep = node_split(s, middle_start);
1474 
1475 		/*
1476 		 * As needed, split just after end of middle bits.
1477 		 * No split needed if end of middle bits is at highest
1478 		 * supported bit index.
1479 		 */
1480 		if (middle_end + 1 > middle_end)
1481 			(void) node_split(s, middle_end + 1);
1482 
1483 		/* Delete nodes that only describe bits within the middle. */
1484 		for (next = node_next(s, nodep);
1485 			next && (next->idx < middle_end);
1486 			next = node_next(s, nodep)) {
1487 			assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end);
1488 			node_rm(s, next);
1489 			next = NULL;
1490 		}
1491 
1492 		/* As needed clear each of the mask bits */
1493 		for (n1 = 0; n1 < MASK_BITS; n1++) {
1494 			if (nodep->mask & (1 << n1)) {
1495 				nodep->mask &= ~(1 << n1);
1496 				s->num_set--;
1497 			}
1498 		}
1499 
1500 		/* Clear any bits described by num_after */
1501 		s->num_set -= nodep->num_after;
1502 		nodep->num_after = 0;
1503 
1504 		/*
1505 		 * Delete the node that describes the beginning of
1506 		 * the middle bits and perform any allowed reductions
1507 		 * with the nodes prev or next of nodep.
1508 		 */
1509 		node_reduce(s, nodep);
1510 		nodep = NULL;
1511 	}
1512 	idx = middle_end + 1;
1513 	n -= middle_end - middle_start + 1;
1514 
1515 	/* Trailing - bits at and beyond last mask boundary */
1516 	assert(n < MASK_BITS);
1517 	for (; n > 0; idx++, n--)
1518 		bit_clear(s, idx);
1519 }
1520 
1521 /* Sets the bit at the index given by idx.  */
1522 void sparsebit_set(struct sparsebit *s, sparsebit_idx_t idx)
1523 {
1524 	sparsebit_set_num(s, idx, 1);
1525 }
1526 
1527 /* Clears the bit at the index given by idx.  */
1528 void sparsebit_clear(struct sparsebit *s, sparsebit_idx_t idx)
1529 {
1530 	sparsebit_clear_num(s, idx, 1);
1531 }
1532 
1533 /* Sets the bits in the entire addressable range of the sparsebit array.  */
1534 void sparsebit_set_all(struct sparsebit *s)
1535 {
1536 	sparsebit_set(s, 0);
1537 	sparsebit_set_num(s, 1, ~(sparsebit_idx_t) 0);
1538 	assert(sparsebit_all_set(s));
1539 }
1540 
1541 /* Clears the bits in the entire addressable range of the sparsebit array.  */
1542 void sparsebit_clear_all(struct sparsebit *s)
1543 {
1544 	sparsebit_clear(s, 0);
1545 	sparsebit_clear_num(s, 1, ~(sparsebit_idx_t) 0);
1546 	assert(!sparsebit_any_set(s));
1547 }
1548 
1549 static size_t display_range(FILE *stream, sparsebit_idx_t low,
1550 	sparsebit_idx_t high, bool prepend_comma_space)
1551 {
1552 	char *fmt_str;
1553 	size_t sz;
1554 
1555 	/* Determine the printf format string */
1556 	if (low == high)
1557 		fmt_str = prepend_comma_space ? ", 0x%lx" : "0x%lx";
1558 	else
1559 		fmt_str = prepend_comma_space ? ", 0x%lx:0x%lx" : "0x%lx:0x%lx";
1560 
1561 	/*
1562 	 * When stream is NULL, just determine the size of what would
1563 	 * have been printed, else print the range.
1564 	 */
1565 	if (!stream)
1566 		sz = snprintf(NULL, 0, fmt_str, low, high);
1567 	else
1568 		sz = fprintf(stream, fmt_str, low, high);
1569 
1570 	return sz;
1571 }
1572 
1573 
1574 /* Dumps to the FILE stream given by stream, the bit settings
1575  * of s.  Each line of output is prefixed with the number of
1576  * spaces given by indent.  The length of each line is implementation
1577  * dependent and does not depend on the indent amount.  The following
1578  * is an example output of a sparsebit array that has bits:
1579  *
1580  *   0x5, 0x8, 0xa:0xe, 0x12
1581  *
1582  * This corresponds to a sparsebit whose bits 5, 8, 10, 11, 12, 13, 14, 18
1583  * are set.  Note that a ':', instead of a '-' is used to specify a range of
1584  * contiguous bits.  This is done because '-' is used to specify command-line
1585  * options, and sometimes ranges are specified as command-line arguments.
1586  */
1587 void sparsebit_dump(FILE *stream, struct sparsebit *s,
1588 	unsigned int indent)
1589 {
1590 	size_t current_line_len = 0;
1591 	size_t sz;
1592 	struct node *nodep;
1593 
1594 	if (!sparsebit_any_set(s))
1595 		return;
1596 
1597 	/* Display initial indent */
1598 	fprintf(stream, "%*s", indent, "");
1599 
1600 	/* For each node */
1601 	for (nodep = node_first(s); nodep; nodep = node_next(s, nodep)) {
1602 		unsigned int n1;
1603 		sparsebit_idx_t low, high;
1604 
1605 		/* For each group of bits in the mask */
1606 		for (n1 = 0; n1 < MASK_BITS; n1++) {
1607 			if (nodep->mask & (1 << n1)) {
1608 				low = high = nodep->idx + n1;
1609 
1610 				for (; n1 < MASK_BITS; n1++) {
1611 					if (nodep->mask & (1 << n1))
1612 						high = nodep->idx + n1;
1613 					else
1614 						break;
1615 				}
1616 
1617 				if ((n1 == MASK_BITS) && nodep->num_after)
1618 					high += nodep->num_after;
1619 
1620 				/*
1621 				 * How much room will it take to display
1622 				 * this range.
1623 				 */
1624 				sz = display_range(NULL, low, high,
1625 					current_line_len != 0);
1626 
1627 				/*
1628 				 * If there is not enough room, display
1629 				 * a newline plus the indent of the next
1630 				 * line.
1631 				 */
1632 				if (current_line_len + sz > DUMP_LINE_MAX) {
1633 					fputs("\n", stream);
1634 					fprintf(stream, "%*s", indent, "");
1635 					current_line_len = 0;
1636 				}
1637 
1638 				/* Display the range */
1639 				sz = display_range(stream, low, high,
1640 					current_line_len != 0);
1641 				current_line_len += sz;
1642 			}
1643 		}
1644 
1645 		/*
1646 		 * If num_after and most significant-bit of mask is not
1647 		 * set, then still need to display a range for the bits
1648 		 * described by num_after.
1649 		 */
1650 		if (!(nodep->mask & (1 << (MASK_BITS - 1))) && nodep->num_after) {
1651 			low = nodep->idx + MASK_BITS;
1652 			high = nodep->idx + MASK_BITS + nodep->num_after - 1;
1653 
1654 			/*
1655 			 * How much room will it take to display
1656 			 * this range.
1657 			 */
1658 			sz = display_range(NULL, low, high,
1659 				current_line_len != 0);
1660 
1661 			/*
1662 			 * If there is not enough room, display
1663 			 * a newline plus the indent of the next
1664 			 * line.
1665 			 */
1666 			if (current_line_len + sz > DUMP_LINE_MAX) {
1667 				fputs("\n", stream);
1668 				fprintf(stream, "%*s", indent, "");
1669 				current_line_len = 0;
1670 			}
1671 
1672 			/* Display the range */
1673 			sz = display_range(stream, low, high,
1674 				current_line_len != 0);
1675 			current_line_len += sz;
1676 		}
1677 	}
1678 	fputs("\n", stream);
1679 }
1680 
1681 /* Validates the internal state of the sparsebit array given by
1682  * s.  On error, diagnostic information is printed to stderr and
1683  * abort is called.
1684  */
1685 void sparsebit_validate_internal(struct sparsebit *s)
1686 {
1687 	bool error_detected = false;
1688 	struct node *nodep, *prev = NULL;
1689 	sparsebit_num_t total_bits_set = 0;
1690 	unsigned int n1;
1691 
1692 	/* For each node */
1693 	for (nodep = node_first(s); nodep;
1694 		prev = nodep, nodep = node_next(s, nodep)) {
1695 
1696 		/*
1697 		 * Increase total bits set by the number of bits set
1698 		 * in this node.
1699 		 */
1700 		for (n1 = 0; n1 < MASK_BITS; n1++)
1701 			if (nodep->mask & (1 << n1))
1702 				total_bits_set++;
1703 
1704 		total_bits_set += nodep->num_after;
1705 
1706 		/*
1707 		 * Arbitrary choice as to whether a mask of 0 is allowed
1708 		 * or not.  For diagnostic purposes it is beneficial to
1709 		 * have only one valid means to represent a set of bits.
1710 		 * To support this an arbitrary choice has been made
1711 		 * to not allow a mask of zero.
1712 		 */
1713 		if (nodep->mask == 0) {
1714 			fprintf(stderr, "Node mask of zero, "
1715 				"nodep: %p nodep->mask: 0x%x",
1716 				nodep, nodep->mask);
1717 			error_detected = true;
1718 			break;
1719 		}
1720 
1721 		/*
1722 		 * Validate num_after is not greater than the max index
1723 		 * - the number of mask bits.  The num_after member
1724 		 * uses 0-based indexing and thus has no value that
1725 		 * represents all bits set.  This limitation is handled
1726 		 * by requiring a non-zero mask.  With a non-zero mask,
1727 		 * MASK_BITS worth of bits are described by the mask,
1728 		 * which makes the largest needed num_after equal to:
1729 		 *
1730 		 *    (~(sparsebit_num_t) 0) - MASK_BITS + 1
1731 		 */
1732 		if (nodep->num_after
1733 			> (~(sparsebit_num_t) 0) - MASK_BITS + 1) {
1734 			fprintf(stderr, "num_after too large, "
1735 				"nodep: %p nodep->num_after: 0x%lx",
1736 				nodep, nodep->num_after);
1737 			error_detected = true;
1738 			break;
1739 		}
1740 
1741 		/* Validate node index is divisible by the mask size */
1742 		if (nodep->idx % MASK_BITS) {
1743 			fprintf(stderr, "Node index not divisible by "
1744 				"mask size,\n"
1745 				"  nodep: %p nodep->idx: 0x%lx "
1746 				"MASK_BITS: %lu\n",
1747 				nodep, nodep->idx, MASK_BITS);
1748 			error_detected = true;
1749 			break;
1750 		}
1751 
1752 		/*
1753 		 * Validate bits described by node don't wrap beyond the
1754 		 * highest supported index.
1755 		 */
1756 		if ((nodep->idx + MASK_BITS + nodep->num_after - 1) < nodep->idx) {
1757 			fprintf(stderr, "Bits described by node wrap "
1758 				"beyond highest supported index,\n"
1759 				"  nodep: %p nodep->idx: 0x%lx\n"
1760 				"  MASK_BITS: %lu nodep->num_after: 0x%lx",
1761 				nodep, nodep->idx, MASK_BITS, nodep->num_after);
1762 			error_detected = true;
1763 			break;
1764 		}
1765 
1766 		/* Check parent pointers. */
1767 		if (nodep->left) {
1768 			if (nodep->left->parent != nodep) {
1769 				fprintf(stderr, "Left child parent pointer "
1770 					"doesn't point to this node,\n"
1771 					"  nodep: %p nodep->left: %p "
1772 					"nodep->left->parent: %p",
1773 					nodep, nodep->left,
1774 					nodep->left->parent);
1775 				error_detected = true;
1776 				break;
1777 			}
1778 		}
1779 
1780 		if (nodep->right) {
1781 			if (nodep->right->parent != nodep) {
1782 				fprintf(stderr, "Right child parent pointer "
1783 					"doesn't point to this node,\n"
1784 					"  nodep: %p nodep->right: %p "
1785 					"nodep->right->parent: %p",
1786 					nodep, nodep->right,
1787 					nodep->right->parent);
1788 				error_detected = true;
1789 				break;
1790 			}
1791 		}
1792 
1793 		if (!nodep->parent) {
1794 			if (s->root != nodep) {
1795 				fprintf(stderr, "Unexpected root node, "
1796 					"s->root: %p nodep: %p",
1797 					s->root, nodep);
1798 				error_detected = true;
1799 				break;
1800 			}
1801 		}
1802 
1803 		if (prev) {
1804 			/*
1805 			 * Is index of previous node before index of
1806 			 * current node?
1807 			 */
1808 			if (prev->idx >= nodep->idx) {
1809 				fprintf(stderr, "Previous node index "
1810 					">= current node index,\n"
1811 					"  prev: %p prev->idx: 0x%lx\n"
1812 					"  nodep: %p nodep->idx: 0x%lx",
1813 					prev, prev->idx, nodep, nodep->idx);
1814 				error_detected = true;
1815 				break;
1816 			}
1817 
1818 			/*
1819 			 * Nodes occur in asscending order, based on each
1820 			 * nodes starting index.
1821 			 */
1822 			if ((prev->idx + MASK_BITS + prev->num_after - 1)
1823 				>= nodep->idx) {
1824 				fprintf(stderr, "Previous node bit range "
1825 					"overlap with current node bit range,\n"
1826 					"  prev: %p prev->idx: 0x%lx "
1827 					"prev->num_after: 0x%lx\n"
1828 					"  nodep: %p nodep->idx: 0x%lx "
1829 					"nodep->num_after: 0x%lx\n"
1830 					"  MASK_BITS: %lu",
1831 					prev, prev->idx, prev->num_after,
1832 					nodep, nodep->idx, nodep->num_after,
1833 					MASK_BITS);
1834 				error_detected = true;
1835 				break;
1836 			}
1837 
1838 			/*
1839 			 * When the node has all mask bits set, it shouldn't
1840 			 * be adjacent to the last bit described by the
1841 			 * previous node.
1842 			 */
1843 			if (nodep->mask == ~(mask_t) 0 &&
1844 			    prev->idx + MASK_BITS + prev->num_after == nodep->idx) {
1845 				fprintf(stderr, "Current node has mask with "
1846 					"all bits set and is adjacent to the "
1847 					"previous node,\n"
1848 					"  prev: %p prev->idx: 0x%lx "
1849 					"prev->num_after: 0x%lx\n"
1850 					"  nodep: %p nodep->idx: 0x%lx "
1851 					"nodep->num_after: 0x%lx\n"
1852 					"  MASK_BITS: %lu",
1853 					prev, prev->idx, prev->num_after,
1854 					nodep, nodep->idx, nodep->num_after,
1855 					MASK_BITS);
1856 
1857 				error_detected = true;
1858 				break;
1859 			}
1860 		}
1861 	}
1862 
1863 	if (!error_detected) {
1864 		/*
1865 		 * Is sum of bits set in each node equal to the count
1866 		 * of total bits set.
1867 		 */
1868 		if (s->num_set != total_bits_set) {
1869 			fprintf(stderr, "Number of bits set missmatch,\n"
1870 				"  s->num_set: 0x%lx total_bits_set: 0x%lx",
1871 				s->num_set, total_bits_set);
1872 
1873 			error_detected = true;
1874 		}
1875 	}
1876 
1877 	if (error_detected) {
1878 		fputs("  dump_internal:\n", stderr);
1879 		sparsebit_dump_internal(stderr, s, 4);
1880 		abort();
1881 	}
1882 }
1883 
1884 
1885 #ifdef FUZZ
1886 /* A simple but effective fuzzing driver.  Look for bugs with the help
1887  * of some invariants and of a trivial representation of sparsebit.
1888  * Just use 512 bytes of /dev/zero and /dev/urandom as inputs, and let
1889  * afl-fuzz do the magic. :)
1890  */
1891 
1892 #include <stdlib.h>
1893 #include <assert.h>
1894 
1895 struct range {
1896 	sparsebit_idx_t first, last;
1897 	bool set;
1898 };
1899 
1900 struct sparsebit *s;
1901 struct range ranges[1000];
1902 int num_ranges;
1903 
1904 static bool get_value(sparsebit_idx_t idx)
1905 {
1906 	int i;
1907 
1908 	for (i = num_ranges; --i >= 0; )
1909 		if (ranges[i].first <= idx && idx <= ranges[i].last)
1910 			return ranges[i].set;
1911 
1912 	return false;
1913 }
1914 
1915 static void operate(int code, sparsebit_idx_t first, sparsebit_idx_t last)
1916 {
1917 	sparsebit_num_t num;
1918 	sparsebit_idx_t next;
1919 
1920 	if (first < last) {
1921 		num = last - first + 1;
1922 	} else {
1923 		num = first - last + 1;
1924 		first = last;
1925 		last = first + num - 1;
1926 	}
1927 
1928 	switch (code) {
1929 	case 0:
1930 		sparsebit_set(s, first);
1931 		assert(sparsebit_is_set(s, first));
1932 		assert(!sparsebit_is_clear(s, first));
1933 		assert(sparsebit_any_set(s));
1934 		assert(!sparsebit_all_clear(s));
1935 		if (get_value(first))
1936 			return;
1937 		if (num_ranges == 1000)
1938 			exit(0);
1939 		ranges[num_ranges++] = (struct range)
1940 			{ .first = first, .last = first, .set = true };
1941 		break;
1942 	case 1:
1943 		sparsebit_clear(s, first);
1944 		assert(!sparsebit_is_set(s, first));
1945 		assert(sparsebit_is_clear(s, first));
1946 		assert(sparsebit_any_clear(s));
1947 		assert(!sparsebit_all_set(s));
1948 		if (!get_value(first))
1949 			return;
1950 		if (num_ranges == 1000)
1951 			exit(0);
1952 		ranges[num_ranges++] = (struct range)
1953 			{ .first = first, .last = first, .set = false };
1954 		break;
1955 	case 2:
1956 		assert(sparsebit_is_set(s, first) == get_value(first));
1957 		assert(sparsebit_is_clear(s, first) == !get_value(first));
1958 		break;
1959 	case 3:
1960 		if (sparsebit_any_set(s))
1961 			assert(get_value(sparsebit_first_set(s)));
1962 		if (sparsebit_any_clear(s))
1963 			assert(!get_value(sparsebit_first_clear(s)));
1964 		sparsebit_set_all(s);
1965 		assert(!sparsebit_any_clear(s));
1966 		assert(sparsebit_all_set(s));
1967 		num_ranges = 0;
1968 		ranges[num_ranges++] = (struct range)
1969 			{ .first = 0, .last = ~(sparsebit_idx_t)0, .set = true };
1970 		break;
1971 	case 4:
1972 		if (sparsebit_any_set(s))
1973 			assert(get_value(sparsebit_first_set(s)));
1974 		if (sparsebit_any_clear(s))
1975 			assert(!get_value(sparsebit_first_clear(s)));
1976 		sparsebit_clear_all(s);
1977 		assert(!sparsebit_any_set(s));
1978 		assert(sparsebit_all_clear(s));
1979 		num_ranges = 0;
1980 		break;
1981 	case 5:
1982 		next = sparsebit_next_set(s, first);
1983 		assert(next == 0 || next > first);
1984 		assert(next == 0 || get_value(next));
1985 		break;
1986 	case 6:
1987 		next = sparsebit_next_clear(s, first);
1988 		assert(next == 0 || next > first);
1989 		assert(next == 0 || !get_value(next));
1990 		break;
1991 	case 7:
1992 		next = sparsebit_next_clear(s, first);
1993 		if (sparsebit_is_set_num(s, first, num)) {
1994 			assert(next == 0 || next > last);
1995 			if (first)
1996 				next = sparsebit_next_set(s, first - 1);
1997 			else if (sparsebit_any_set(s))
1998 				next = sparsebit_first_set(s);
1999 			else
2000 				return;
2001 			assert(next == first);
2002 		} else {
2003 			assert(sparsebit_is_clear(s, first) || next <= last);
2004 		}
2005 		break;
2006 	case 8:
2007 		next = sparsebit_next_set(s, first);
2008 		if (sparsebit_is_clear_num(s, first, num)) {
2009 			assert(next == 0 || next > last);
2010 			if (first)
2011 				next = sparsebit_next_clear(s, first - 1);
2012 			else if (sparsebit_any_clear(s))
2013 				next = sparsebit_first_clear(s);
2014 			else
2015 				return;
2016 			assert(next == first);
2017 		} else {
2018 			assert(sparsebit_is_set(s, first) || next <= last);
2019 		}
2020 		break;
2021 	case 9:
2022 		sparsebit_set_num(s, first, num);
2023 		assert(sparsebit_is_set_num(s, first, num));
2024 		assert(!sparsebit_is_clear_num(s, first, num));
2025 		assert(sparsebit_any_set(s));
2026 		assert(!sparsebit_all_clear(s));
2027 		if (num_ranges == 1000)
2028 			exit(0);
2029 		ranges[num_ranges++] = (struct range)
2030 			{ .first = first, .last = last, .set = true };
2031 		break;
2032 	case 10:
2033 		sparsebit_clear_num(s, first, num);
2034 		assert(!sparsebit_is_set_num(s, first, num));
2035 		assert(sparsebit_is_clear_num(s, first, num));
2036 		assert(sparsebit_any_clear(s));
2037 		assert(!sparsebit_all_set(s));
2038 		if (num_ranges == 1000)
2039 			exit(0);
2040 		ranges[num_ranges++] = (struct range)
2041 			{ .first = first, .last = last, .set = false };
2042 		break;
2043 	case 11:
2044 		sparsebit_validate_internal(s);
2045 		break;
2046 	default:
2047 		break;
2048 	}
2049 }
2050 
2051 unsigned char get8(void)
2052 {
2053 	int ch;
2054 
2055 	ch = getchar();
2056 	if (ch == EOF)
2057 		exit(0);
2058 	return ch;
2059 }
2060 
2061 uint64_t get64(void)
2062 {
2063 	uint64_t x;
2064 
2065 	x = get8();
2066 	x = (x << 8) | get8();
2067 	x = (x << 8) | get8();
2068 	x = (x << 8) | get8();
2069 	x = (x << 8) | get8();
2070 	x = (x << 8) | get8();
2071 	x = (x << 8) | get8();
2072 	return (x << 8) | get8();
2073 }
2074 
2075 int main(void)
2076 {
2077 	s = sparsebit_alloc();
2078 	for (;;) {
2079 		uint8_t op = get8() & 0xf;
2080 		uint64_t first = get64();
2081 		uint64_t last = get64();
2082 
2083 		operate(op, first, last);
2084 	}
2085 }
2086 #endif
2087