xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/rtree.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_RTREE_H
2 #define JEMALLOC_INTERNAL_RTREE_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/rtree_tsd.h"
7 #include "jemalloc/internal/sc.h"
8 #include "jemalloc/internal/tsd.h"
9 
10 /*
11  * This radix tree implementation is tailored to the singular purpose of
12  * associating metadata with extents that are currently owned by jemalloc.
13  *
14  *******************************************************************************
15  */
16 
17 /* Number of high insignificant bits. */
18 #define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
19 /* Number of low insigificant bits. */
20 #define RTREE_NLIB LG_PAGE
21 /* Number of significant bits. */
22 #define RTREE_NSB (LG_VADDR - RTREE_NLIB)
23 /* Number of levels in radix tree. */
24 #if RTREE_NSB <= 10
25 #  define RTREE_HEIGHT 1
26 #elif RTREE_NSB <= 36
27 #  define RTREE_HEIGHT 2
28 #elif RTREE_NSB <= 52
29 #  define RTREE_HEIGHT 3
30 #else
31 #  error Unsupported number of significant virtual address bits
32 #endif
33 /* Use compact leaf representation if virtual address encoding allows. */
34 #if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
35 #  define RTREE_LEAF_COMPACT
36 #endif
37 
38 typedef struct rtree_node_elm_s rtree_node_elm_t;
39 struct rtree_node_elm_s {
40 	atomic_p_t	child; /* (rtree_{node,leaf}_elm_t *) */
41 };
42 
43 typedef struct rtree_metadata_s rtree_metadata_t;
44 struct rtree_metadata_s {
45 	szind_t szind;
46 	extent_state_t state; /* Mirrors edata->state. */
47 	bool is_head; /* Mirrors edata->is_head. */
48 	bool slab;
49 };
50 
51 typedef struct rtree_contents_s rtree_contents_t;
52 struct rtree_contents_s {
53 	edata_t *edata;
54 	rtree_metadata_t metadata;
55 };
56 
57 #define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
58 #define RTREE_LEAF_STATE_SHIFT 2
59 #define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
60 
61 struct rtree_leaf_elm_s {
62 #ifdef RTREE_LEAF_COMPACT
63 	/*
64 	 * Single pointer-width field containing all three leaf element fields.
65 	 * For example, on a 64-bit x64 system with 48 significant virtual
66 	 * memory address bits, the index, edata, and slab fields are packed as
67 	 * such:
68 	 *
69 	 * x: index
70 	 * e: edata
71 	 * s: state
72 	 * h: is_head
73 	 * b: slab
74 	 *
75 	 *   00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
76 	 */
77 	atomic_p_t	le_bits;
78 #else
79 	atomic_p_t	le_edata; /* (edata_t *) */
80 	/*
81 	 * From high to low bits: szind (8 bits), state (4 bits), is_head, slab
82 	 */
83 	atomic_u_t	le_metadata;
84 #endif
85 };
86 
87 typedef struct rtree_level_s rtree_level_t;
88 struct rtree_level_s {
89 	/* Number of key bits distinguished by this level. */
90 	unsigned		bits;
91 	/*
92 	 * Cumulative number of key bits distinguished by traversing to
93 	 * corresponding tree level.
94 	 */
95 	unsigned		cumbits;
96 };
97 
98 typedef struct rtree_s rtree_t;
99 struct rtree_s {
100 	base_t			*base;
101 	malloc_mutex_t		init_lock;
102 	/* Number of elements based on rtree_levels[0].bits. */
103 #if RTREE_HEIGHT > 1
104 	rtree_node_elm_t	root[1U << (RTREE_NSB/RTREE_HEIGHT)];
105 #else
106 	rtree_leaf_elm_t	root[1U << (RTREE_NSB/RTREE_HEIGHT)];
107 #endif
108 };
109 
110 /*
111  * Split the bits into one to three partitions depending on number of
112  * significant bits.  It the number of bits does not divide evenly into the
113  * number of levels, place one remainder bit per level starting at the leaf
114  * level.
115  */
116 static const rtree_level_t rtree_levels[] = {
117 #if RTREE_HEIGHT == 1
118 	{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
119 #elif RTREE_HEIGHT == 2
120 	{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
121 	{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
122 #elif RTREE_HEIGHT == 3
123 	{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
124 	{RTREE_NSB/3 + RTREE_NSB%3/2,
125 	    RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
126 	{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
127 #else
128 #  error Unsupported rtree height
129 #endif
130 };
131 
132 bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed);
133 
134 rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
135     rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
136 
137 JEMALLOC_ALWAYS_INLINE unsigned
138 rtree_leaf_maskbits(void) {
139 	unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
140 	unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
141 	    rtree_levels[RTREE_HEIGHT-1].bits);
142 	return ptrbits - cumbits;
143 }
144 
145 JEMALLOC_ALWAYS_INLINE uintptr_t
146 rtree_leafkey(uintptr_t key) {
147 	uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1);
148 	return (key & mask);
149 }
150 
151 JEMALLOC_ALWAYS_INLINE size_t
152 rtree_cache_direct_map(uintptr_t key) {
153 	return (size_t)((key >> rtree_leaf_maskbits()) &
154 	    (RTREE_CTX_NCACHE - 1));
155 }
156 
157 JEMALLOC_ALWAYS_INLINE uintptr_t
158 rtree_subkey(uintptr_t key, unsigned level) {
159 	unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
160 	unsigned cumbits = rtree_levels[level].cumbits;
161 	unsigned shiftbits = ptrbits - cumbits;
162 	unsigned maskbits = rtree_levels[level].bits;
163 	uintptr_t mask = (ZU(1) << maskbits) - 1;
164 	return ((key >> shiftbits) & mask);
165 }
166 
167 /*
168  * Atomic getters.
169  *
170  * dependent: Reading a value on behalf of a pointer to a valid allocation
171  *            is guaranteed to be a clean read even without synchronization,
172  *            because the rtree update became visible in memory before the
173  *            pointer came into existence.
174  * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
175  *             dependent on a previous rtree write, which means a stale read
176  *             could result if synchronization were omitted here.
177  */
178 #  ifdef RTREE_LEAF_COMPACT
179 JEMALLOC_ALWAYS_INLINE uintptr_t
180 rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
181     rtree_leaf_elm_t *elm, bool dependent) {
182 	return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
183 	    ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
184 }
185 
186 JEMALLOC_ALWAYS_INLINE uintptr_t
187 rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
188 	assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
189 	uintptr_t edata_bits = (uintptr_t)contents.edata
190 	    & (((uintptr_t)1 << LG_VADDR) - 1);
191 
192 	uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
193 	uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
194 	uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
195 	uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
196 	    RTREE_LEAF_STATE_SHIFT;
197 	uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
198 	    slab_bits;
199 	assert((edata_bits & metadata_bits) == 0);
200 
201 	return edata_bits | metadata_bits;
202 }
203 
204 JEMALLOC_ALWAYS_INLINE rtree_contents_t
205 rtree_leaf_elm_bits_decode(uintptr_t bits) {
206 	rtree_contents_t contents;
207 	/* Do the easy things first. */
208 	contents.metadata.szind = bits >> LG_VADDR;
209 	contents.metadata.slab = (bool)(bits & 1);
210 	contents.metadata.is_head = (bool)(bits & (1 << 1));
211 
212 	uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
213 	    RTREE_LEAF_STATE_SHIFT;
214 	assert(state_bits <= extent_state_max);
215 	contents.metadata.state = (extent_state_t)state_bits;
216 
217 	uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
218 #    ifdef __aarch64__
219 	/*
220 	 * aarch64 doesn't sign extend the highest virtual address bit to set
221 	 * the higher ones.  Instead, the high bits get zeroed.
222 	 */
223 	uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
224 	/* Mask off metadata. */
225 	uintptr_t mask = high_bit_mask & low_bit_mask;
226 	contents.edata = (edata_t *)(bits & mask);
227 #    else
228 	/* Restore sign-extended high bits, mask metadata bits. */
229 	contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
230 	    >> RTREE_NHIB) & low_bit_mask);
231 #    endif
232 	assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
233 	return contents;
234 }
235 
236 #  endif /* RTREE_LEAF_COMPACT */
237 
238 JEMALLOC_ALWAYS_INLINE rtree_contents_t
239 rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
240     bool dependent) {
241 #ifdef RTREE_LEAF_COMPACT
242 	uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
243 	rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
244 	return contents;
245 #else
246 	rtree_contents_t contents;
247 	unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
248 	    ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
249 	contents.metadata.slab = (bool)(metadata_bits & 1);
250 	contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
251 
252 	uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
253 	    RTREE_LEAF_STATE_SHIFT;
254 	assert(state_bits <= extent_state_max);
255 	contents.metadata.state = (extent_state_t)state_bits;
256 	contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
257 	    RTREE_LEAF_STATE_WIDTH);
258 
259 	contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
260 	    ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
261 
262 	return contents;
263 #endif
264 }
265 
266 JEMALLOC_ALWAYS_INLINE void
267 rtree_contents_encode(rtree_contents_t contents, void **bits,
268     unsigned *additional) {
269 #ifdef RTREE_LEAF_COMPACT
270 	*bits = (void *)rtree_leaf_elm_bits_encode(contents);
271 #else
272 	*additional = (unsigned)contents.metadata.slab
273 	    | ((unsigned)contents.metadata.is_head << 1)
274 	    | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
275 	    | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
276 	    RTREE_LEAF_STATE_WIDTH));
277 	*bits = contents.edata;
278 #endif
279 }
280 
281 JEMALLOC_ALWAYS_INLINE void
282 rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
283     rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
284 #ifdef RTREE_LEAF_COMPACT
285 	atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
286 #else
287 	atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE);
288 	/*
289 	 * Write edata last, since the element is atomically considered valid
290 	 * as soon as the edata field is non-NULL.
291 	 */
292 	atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE);
293 #endif
294 }
295 
296 JEMALLOC_ALWAYS_INLINE void
297 rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
298     rtree_leaf_elm_t *elm, rtree_contents_t contents) {
299 	assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
300 	void *bits;
301 	unsigned additional;
302 
303 	rtree_contents_encode(contents, &bits, &additional);
304 	rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
305 }
306 
307 /* The state field can be updated independently (and more frequently). */
308 JEMALLOC_ALWAYS_INLINE void
309 rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
310     rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) {
311 	assert(elm1 != NULL);
312 #ifdef RTREE_LEAF_COMPACT
313 	uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1,
314 	    /* dependent */ true);
315 	bits &= ~RTREE_LEAF_STATE_MASK;
316 	bits |= state << RTREE_LEAF_STATE_SHIFT;
317 	atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
318 	if (elm2 != NULL) {
319 		atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
320 	}
321 #else
322 	unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED);
323 	bits &= ~RTREE_LEAF_STATE_MASK;
324 	bits |= state << RTREE_LEAF_STATE_SHIFT;
325 	atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE);
326 	if (elm2 != NULL) {
327 		atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE);
328 	}
329 #endif
330 }
331 
332 /*
333  * Tries to look up the key in the L1 cache, returning false if there's a hit, or
334  * true if there's a miss.
335  * Key is allowed to be NULL; returns true in this case.
336  */
337 JEMALLOC_ALWAYS_INLINE bool
338 rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
339     uintptr_t key, rtree_leaf_elm_t **elm) {
340 	size_t slot = rtree_cache_direct_map(key);
341 	uintptr_t leafkey = rtree_leafkey(key);
342 	assert(leafkey != RTREE_LEAFKEY_INVALID);
343 
344 	if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) {
345 		return true;
346 	}
347 
348 	rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
349 	assert(leaf != NULL);
350 	uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
351 	*elm = &leaf[subkey];
352 
353 	return false;
354 }
355 
356 JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
357 rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
358     uintptr_t key, bool dependent, bool init_missing) {
359 	assert(key != 0);
360 	assert(!dependent || !init_missing);
361 
362 	size_t slot = rtree_cache_direct_map(key);
363 	uintptr_t leafkey = rtree_leafkey(key);
364 	assert(leafkey != RTREE_LEAFKEY_INVALID);
365 
366 	/* Fast path: L1 direct mapped cache. */
367 	if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
368 		rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
369 		assert(leaf != NULL);
370 		uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
371 		return &leaf[subkey];
372 	}
373 	/*
374 	 * Search the L2 LRU cache.  On hit, swap the matching element into the
375 	 * slot in L1 cache, and move the position in L2 up by 1.
376 	 */
377 #define RTREE_CACHE_CHECK_L2(i) do {					\
378 	if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) {	\
379 		rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf;	\
380 		assert(leaf != NULL);					\
381 		if (i > 0) {						\
382 			/* Bubble up by one. */				\
383 			rtree_ctx->l2_cache[i].leafkey =		\
384 				rtree_ctx->l2_cache[i - 1].leafkey;	\
385 			rtree_ctx->l2_cache[i].leaf =			\
386 				rtree_ctx->l2_cache[i - 1].leaf;	\
387 			rtree_ctx->l2_cache[i - 1].leafkey =		\
388 			    rtree_ctx->cache[slot].leafkey;		\
389 			rtree_ctx->l2_cache[i - 1].leaf =		\
390 			    rtree_ctx->cache[slot].leaf;		\
391 		} else {						\
392 			rtree_ctx->l2_cache[0].leafkey =		\
393 			    rtree_ctx->cache[slot].leafkey;		\
394 			rtree_ctx->l2_cache[0].leaf =			\
395 			    rtree_ctx->cache[slot].leaf;		\
396 		}							\
397 		rtree_ctx->cache[slot].leafkey = leafkey;		\
398 		rtree_ctx->cache[slot].leaf = leaf;			\
399 		uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);	\
400 		return &leaf[subkey];					\
401 	}								\
402 } while (0)
403 	/* Check the first cache entry. */
404 	RTREE_CACHE_CHECK_L2(0);
405 	/* Search the remaining cache elements. */
406 	for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
407 		RTREE_CACHE_CHECK_L2(i);
408 	}
409 #undef RTREE_CACHE_CHECK_L2
410 
411 	return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
412 	    dependent, init_missing);
413 }
414 
415 /*
416  * Returns true on lookup failure.
417  */
418 static inline bool
419 rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
420     uintptr_t key, rtree_contents_t *r_contents) {
421 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
422 	    key, /* dependent */ false, /* init_missing */ false);
423 	if (elm == NULL) {
424 		return true;
425 	}
426 	*r_contents = rtree_leaf_elm_read(tsdn, rtree, elm,
427 	    /* dependent */ false);
428 	return false;
429 }
430 
431 static inline rtree_contents_t
432 rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
433     uintptr_t key) {
434 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
435 	    key, /* dependent */ true, /* init_missing */ false);
436 	assert(elm != NULL);
437 	return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true);
438 }
439 
440 static inline rtree_metadata_t
441 rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
442     uintptr_t key) {
443 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
444 	    key, /* dependent */ true, /* init_missing */ false);
445 	assert(elm != NULL);
446 	return rtree_leaf_elm_read(tsdn, rtree, elm,
447 	    /* dependent */ true).metadata;
448 }
449 
450 /*
451  * Returns true when the request cannot be fulfilled by fastpath.
452  */
453 static inline bool
454 rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
455     uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
456 	rtree_leaf_elm_t *elm;
457 	/*
458 	 * Should check the bool return value (lookup success or not) instead of
459 	 * elm == NULL (which will result in an extra branch).  This is because
460 	 * when the cache lookup succeeds, there will never be a NULL pointer
461 	 * returned (which is unknown to the compiler).
462 	 */
463 	if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) {
464 		return true;
465 	}
466 	assert(elm != NULL);
467 	*r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
468 	    /* dependent */ true).metadata;
469 	return false;
470 }
471 
472 JEMALLOC_ALWAYS_INLINE void
473 rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
474     uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) {
475 	assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0);
476 	/*
477 	 * Only used for emap_(de)register_interior, which implies the
478 	 * boundaries have been registered already.  Therefore all the lookups
479 	 * are dependent w/o init_missing, assuming the range spans across at
480 	 * most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
481 	 */
482 	void *bits;
483 	unsigned additional;
484 	rtree_contents_encode(contents, &bits, &additional);
485 
486 	rtree_leaf_elm_t *elm = NULL; /* Dead store. */
487 	for (uintptr_t addr = base; addr <= end; addr += PAGE) {
488 		if (addr == base ||
489 		    (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
490 			elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
491 			    /* dependent */ true, /* init_missing */ false);
492 			assert(elm != NULL);
493 		}
494 		assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
495 		    /* dependent */ true, /* init_missing */ false));
496 		assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
497 		    /* dependent */ true).edata != NULL);
498 		rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
499 		elm++;
500 	}
501 }
502 
503 JEMALLOC_ALWAYS_INLINE void
504 rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
505     uintptr_t base, uintptr_t end, rtree_contents_t contents) {
506 	rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
507 	    /* clearing */ false);
508 }
509 
510 JEMALLOC_ALWAYS_INLINE bool
511 rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
512     rtree_contents_t contents) {
513 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
514 	    key, /* dependent */ false, /* init_missing */ true);
515 	if (elm == NULL) {
516 		return true;
517 	}
518 
519 	rtree_leaf_elm_write(tsdn, rtree, elm, contents);
520 
521 	return false;
522 }
523 
524 static inline void
525 rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
526     uintptr_t key) {
527 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
528 	    key, /* dependent */ true, /* init_missing */ false);
529 	assert(elm != NULL);
530 	assert(rtree_leaf_elm_read(tsdn, rtree, elm,
531 	    /* dependent */ true).edata != NULL);
532 	rtree_contents_t contents;
533 	contents.edata = NULL;
534 	contents.metadata.szind = SC_NSIZES;
535 	contents.metadata.slab = false;
536 	contents.metadata.is_head = false;
537 	contents.metadata.state = (extent_state_t)0;
538 	rtree_leaf_elm_write(tsdn, rtree, elm, contents);
539 }
540 
541 static inline void
542 rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
543     uintptr_t base, uintptr_t end) {
544 	rtree_contents_t contents;
545 	contents.edata = NULL;
546 	contents.metadata.szind = SC_NSIZES;
547 	contents.metadata.slab = false;
548 	contents.metadata.is_head = false;
549 	contents.metadata.state = (extent_state_t)0;
550 	rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
551 	    /* clearing */ true);
552 }
553 
554 #endif /* JEMALLOC_INTERNAL_RTREE_H */
555