xref: /freebsd/contrib/jemalloc/src/rtree.c (revision cdebaff820b2a4915a16cedfd511823d78aab171)
1 #define	JEMALLOC_RTREE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 static unsigned
5 hmin(unsigned ha, unsigned hb)
6 {
7 
8 	return (ha < hb ? ha : hb);
9 }
10 
11 /* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
12 bool
13 rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
14     rtree_node_dalloc_t *dalloc)
15 {
16 	unsigned bits_in_leaf, height, i;
17 
18 	assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
19 	    RTREE_BITS_PER_LEVEL));
20 	assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
21 
22 	bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
23 	    : (bits % RTREE_BITS_PER_LEVEL);
24 	if (bits > bits_in_leaf) {
25 		height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
26 		if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
27 			height++;
28 	} else
29 		height = 1;
30 	assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
31 
32 	rtree->alloc = alloc;
33 	rtree->dalloc = dalloc;
34 	rtree->height = height;
35 
36 	/* Root level. */
37 	rtree->levels[0].subtree = NULL;
38 	rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
39 	    bits_in_leaf;
40 	rtree->levels[0].cumbits = rtree->levels[0].bits;
41 	/* Interior levels. */
42 	for (i = 1; i < height-1; i++) {
43 		rtree->levels[i].subtree = NULL;
44 		rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
45 		rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
46 		    RTREE_BITS_PER_LEVEL;
47 	}
48 	/* Leaf level. */
49 	if (height > 1) {
50 		rtree->levels[height-1].subtree = NULL;
51 		rtree->levels[height-1].bits = bits_in_leaf;
52 		rtree->levels[height-1].cumbits = bits;
53 	}
54 
55 	/* Compute lookup table to be used by rtree_start_level(). */
56 	for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
57 		rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
58 		    1);
59 	}
60 
61 	return (false);
62 }
63 
64 static void
65 rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
66 {
67 
68 	if (level + 1 < rtree->height) {
69 		size_t nchildren, i;
70 
71 		nchildren = ZU(1) << rtree->levels[level].bits;
72 		for (i = 0; i < nchildren; i++) {
73 			rtree_node_elm_t *child = node[i].child;
74 			if (child != NULL)
75 				rtree_delete_subtree(rtree, child, level + 1);
76 		}
77 	}
78 	rtree->dalloc(node);
79 }
80 
81 void
82 rtree_delete(rtree_t *rtree)
83 {
84 	unsigned i;
85 
86 	for (i = 0; i < rtree->height; i++) {
87 		rtree_node_elm_t *subtree = rtree->levels[i].subtree;
88 		if (subtree != NULL)
89 			rtree_delete_subtree(rtree, subtree, i);
90 	}
91 }
92 
93 static rtree_node_elm_t *
94 rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
95 {
96 	rtree_node_elm_t *node;
97 
98 	if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
99 		spin_t spinner;
100 
101 		/*
102 		 * Another thread is already in the process of initializing.
103 		 * Spin-wait until initialization is complete.
104 		 */
105 		spin_init(&spinner);
106 		do {
107 			spin_adaptive(&spinner);
108 			node = atomic_read_p((void **)elmp);
109 		} while (node == RTREE_NODE_INITIALIZING);
110 	} else {
111 		node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
112 		if (node == NULL)
113 			return (NULL);
114 		atomic_write_p((void **)elmp, node);
115 	}
116 
117 	return (node);
118 }
119 
120 rtree_node_elm_t *
121 rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
122 {
123 
124 	return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
125 }
126 
127 rtree_node_elm_t *
128 rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
129 {
130 
131 	return (rtree_node_init(rtree, level+1, &elm->child));
132 }
133