xref: /freebsd/contrib/jemalloc/src/base.c (revision 6ae1554a5d9b318f8ad53ccc39fa5a961403da73)
1 #define	JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 static malloc_mutex_t	base_mtx;
8 static extent_tree_t	base_avail_szad;
9 static extent_node_t	*base_nodes;
10 static size_t		base_allocated;
11 static size_t		base_resident;
12 static size_t		base_mapped;
13 
14 /******************************************************************************/
15 
16 /* base_mtx must be held. */
17 static extent_node_t *
18 base_node_try_alloc(void)
19 {
20 	extent_node_t *node;
21 
22 	if (base_nodes == NULL)
23 		return (NULL);
24 	node = base_nodes;
25 	base_nodes = *(extent_node_t **)node;
26 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
27 	return (node);
28 }
29 
30 /* base_mtx must be held. */
31 static void
32 base_node_dalloc(extent_node_t *node)
33 {
34 
35 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
36 	*(extent_node_t **)node = base_nodes;
37 	base_nodes = node;
38 }
39 
40 /* base_mtx must be held. */
41 static extent_node_t *
42 base_chunk_alloc(size_t minsize)
43 {
44 	extent_node_t *node;
45 	size_t csize, nsize;
46 	void *addr;
47 
48 	assert(minsize != 0);
49 	node = base_node_try_alloc();
50 	/* Allocate enough space to also carve a node out if necessary. */
51 	nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
52 	csize = CHUNK_CEILING(minsize + nsize);
53 	addr = chunk_alloc_base(csize);
54 	if (addr == NULL) {
55 		if (node != NULL)
56 			base_node_dalloc(node);
57 		return (NULL);
58 	}
59 	base_mapped += csize;
60 	if (node == NULL) {
61 		node = (extent_node_t *)addr;
62 		addr = (void *)((uintptr_t)addr + nsize);
63 		csize -= nsize;
64 		if (config_stats) {
65 			base_allocated += nsize;
66 			base_resident += PAGE_CEILING(nsize);
67 		}
68 	}
69 	extent_node_init(node, NULL, addr, csize, true, true);
70 	return (node);
71 }
72 
73 /*
74  * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
75  * sparse data structures such as radix tree nodes efficient with respect to
76  * physical memory usage.
77  */
78 void *
79 base_alloc(size_t size)
80 {
81 	void *ret;
82 	size_t csize, usize;
83 	extent_node_t *node;
84 	extent_node_t key;
85 
86 	/*
87 	 * Round size up to nearest multiple of the cacheline size, so that
88 	 * there is no chance of false cache line sharing.
89 	 */
90 	csize = CACHELINE_CEILING(size);
91 
92 	usize = s2u(csize);
93 	extent_node_init(&key, NULL, NULL, usize, false, false);
94 	malloc_mutex_lock(&base_mtx);
95 	node = extent_tree_szad_nsearch(&base_avail_szad, &key);
96 	if (node != NULL) {
97 		/* Use existing space. */
98 		extent_tree_szad_remove(&base_avail_szad, node);
99 	} else {
100 		/* Try to allocate more space. */
101 		node = base_chunk_alloc(csize);
102 	}
103 	if (node == NULL) {
104 		ret = NULL;
105 		goto label_return;
106 	}
107 
108 	ret = extent_node_addr_get(node);
109 	if (extent_node_size_get(node) > csize) {
110 		extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
111 		extent_node_size_set(node, extent_node_size_get(node) - csize);
112 		extent_tree_szad_insert(&base_avail_szad, node);
113 	} else
114 		base_node_dalloc(node);
115 	if (config_stats) {
116 		base_allocated += csize;
117 		/*
118 		 * Add one PAGE to base_resident for every page boundary that is
119 		 * crossed by the new allocation.
120 		 */
121 		base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
122 		    PAGE_CEILING((uintptr_t)ret);
123 	}
124 	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
125 label_return:
126 	malloc_mutex_unlock(&base_mtx);
127 	return (ret);
128 }
129 
130 void
131 base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
132 {
133 
134 	malloc_mutex_lock(&base_mtx);
135 	assert(base_allocated <= base_resident);
136 	assert(base_resident <= base_mapped);
137 	*allocated = base_allocated;
138 	*resident = base_resident;
139 	*mapped = base_mapped;
140 	malloc_mutex_unlock(&base_mtx);
141 }
142 
143 bool
144 base_boot(void)
145 {
146 
147 	if (malloc_mutex_init(&base_mtx))
148 		return (true);
149 	extent_tree_szad_new(&base_avail_szad);
150 	base_nodes = NULL;
151 
152 	return (false);
153 }
154 
155 void
156 base_prefork(void)
157 {
158 
159 	malloc_mutex_prefork(&base_mtx);
160 }
161 
162 void
163 base_postfork_parent(void)
164 {
165 
166 	malloc_mutex_postfork_parent(&base_mtx);
167 }
168 
169 void
170 base_postfork_child(void)
171 {
172 
173 	malloc_mutex_postfork_child(&base_mtx);
174 }
175