xref: /freebsd/contrib/jemalloc/src/base.c (revision 18849b5da0c5eaa88500b457be05b038813b51b1)
1 #define	JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 static malloc_mutex_t	base_mtx;
8 static extent_tree_t	base_avail_szad;
9 static extent_node_t	*base_nodes;
10 static size_t		base_allocated;
11 static size_t		base_resident;
12 static size_t		base_mapped;
13 
14 /******************************************************************************/
15 
16 static extent_node_t *
17 base_node_try_alloc(tsdn_t *tsdn)
18 {
19 	extent_node_t *node;
20 
21 	malloc_mutex_assert_owner(tsdn, &base_mtx);
22 
23 	if (base_nodes == NULL)
24 		return (NULL);
25 	node = base_nodes;
26 	base_nodes = *(extent_node_t **)node;
27 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
28 	return (node);
29 }
30 
31 static void
32 base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
33 {
34 
35 	malloc_mutex_assert_owner(tsdn, &base_mtx);
36 
37 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
38 	*(extent_node_t **)node = base_nodes;
39 	base_nodes = node;
40 }
41 
42 static extent_node_t *
43 base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
44 {
45 	extent_node_t *node;
46 	size_t csize, nsize;
47 	void *addr;
48 
49 	malloc_mutex_assert_owner(tsdn, &base_mtx);
50 	assert(minsize != 0);
51 	node = base_node_try_alloc(tsdn);
52 	/* Allocate enough space to also carve a node out if necessary. */
53 	nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
54 	csize = CHUNK_CEILING(minsize + nsize);
55 	addr = chunk_alloc_base(csize);
56 	if (addr == NULL) {
57 		if (node != NULL)
58 			base_node_dalloc(tsdn, node);
59 		return (NULL);
60 	}
61 	base_mapped += csize;
62 	if (node == NULL) {
63 		node = (extent_node_t *)addr;
64 		addr = (void *)((uintptr_t)addr + nsize);
65 		csize -= nsize;
66 		if (config_stats) {
67 			base_allocated += nsize;
68 			base_resident += PAGE_CEILING(nsize);
69 		}
70 	}
71 	extent_node_init(node, NULL, addr, csize, true, true);
72 	return (node);
73 }
74 
75 /*
76  * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
77  * sparse data structures such as radix tree nodes efficient with respect to
78  * physical memory usage.
79  */
80 void *
81 base_alloc(tsdn_t *tsdn, size_t size)
82 {
83 	void *ret;
84 	size_t csize, usize;
85 	extent_node_t *node;
86 	extent_node_t key;
87 
88 	/*
89 	 * Round size up to nearest multiple of the cacheline size, so that
90 	 * there is no chance of false cache line sharing.
91 	 */
92 	csize = CACHELINE_CEILING(size);
93 
94 	usize = s2u(csize);
95 	extent_node_init(&key, NULL, NULL, usize, false, false);
96 	malloc_mutex_lock(tsdn, &base_mtx);
97 	node = extent_tree_szad_nsearch(&base_avail_szad, &key);
98 	if (node != NULL) {
99 		/* Use existing space. */
100 		extent_tree_szad_remove(&base_avail_szad, node);
101 	} else {
102 		/* Try to allocate more space. */
103 		node = base_chunk_alloc(tsdn, csize);
104 	}
105 	if (node == NULL) {
106 		ret = NULL;
107 		goto label_return;
108 	}
109 
110 	ret = extent_node_addr_get(node);
111 	if (extent_node_size_get(node) > csize) {
112 		extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
113 		extent_node_size_set(node, extent_node_size_get(node) - csize);
114 		extent_tree_szad_insert(&base_avail_szad, node);
115 	} else
116 		base_node_dalloc(tsdn, node);
117 	if (config_stats) {
118 		base_allocated += csize;
119 		/*
120 		 * Add one PAGE to base_resident for every page boundary that is
121 		 * crossed by the new allocation.
122 		 */
123 		base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
124 		    PAGE_CEILING((uintptr_t)ret);
125 	}
126 	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
127 label_return:
128 	malloc_mutex_unlock(tsdn, &base_mtx);
129 	return (ret);
130 }
131 
132 void
133 base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
134     size_t *mapped)
135 {
136 
137 	malloc_mutex_lock(tsdn, &base_mtx);
138 	assert(base_allocated <= base_resident);
139 	assert(base_resident <= base_mapped);
140 	*allocated = base_allocated;
141 	*resident = base_resident;
142 	*mapped = base_mapped;
143 	malloc_mutex_unlock(tsdn, &base_mtx);
144 }
145 
146 bool
147 base_boot(void)
148 {
149 
150 	if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
151 		return (true);
152 	extent_tree_szad_new(&base_avail_szad);
153 	base_nodes = NULL;
154 
155 	return (false);
156 }
157 
158 void
159 base_prefork(tsdn_t *tsdn)
160 {
161 
162 	malloc_mutex_prefork(tsdn, &base_mtx);
163 }
164 
165 void
166 base_postfork_parent(tsdn_t *tsdn)
167 {
168 
169 	malloc_mutex_postfork_parent(tsdn, &base_mtx);
170 }
171 
172 void
173 base_postfork_child(tsdn_t *tsdn)
174 {
175 
176 	malloc_mutex_postfork_child(tsdn, &base_mtx);
177 }
178