xref: /freebsd/contrib/jemalloc/src/extent_dss.c (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #include "jemalloc/internal/jemalloc_preamble.h"
2 #include "jemalloc/internal/jemalloc_internal_includes.h"
3 
4 #include "jemalloc/internal/assert.h"
5 #include "jemalloc/internal/extent_dss.h"
6 #include "jemalloc/internal/spin.h"
7 
8 /******************************************************************************/
9 /* Data. */
10 
11 const char	*opt_dss = DSS_DEFAULT;
12 
13 const char	*dss_prec_names[] = {
14 	"disabled",
15 	"primary",
16 	"secondary",
17 	"N/A"
18 };
19 
20 /*
21  * Current dss precedence default, used when creating new arenas.  NB: This is
22  * stored as unsigned rather than dss_prec_t because in principle there's no
23  * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
24  * atomic operations to synchronize the setting.
25  */
26 static atomic_u_t	dss_prec_default = ATOMIC_INIT(
27     (unsigned)DSS_PREC_DEFAULT);
28 
29 /* Base address of the DSS. */
30 static void		*dss_base;
31 /* Atomic boolean indicating whether a thread is currently extending DSS. */
32 static atomic_b_t	dss_extending;
33 /* Atomic boolean indicating whether the DSS is exhausted. */
34 static atomic_b_t	dss_exhausted;
35 /* Atomic current upper limit on DSS addresses. */
36 static atomic_p_t	dss_max;
37 
38 /******************************************************************************/
39 
40 static void *
41 extent_dss_sbrk(intptr_t increment) {
42 #ifdef JEMALLOC_DSS
43 	return sbrk(increment);
44 #else
45 	not_implemented();
46 	return NULL;
47 #endif
48 }
49 
50 dss_prec_t
51 extent_dss_prec_get(void) {
52 	dss_prec_t ret;
53 
54 	if (!have_dss) {
55 		return dss_prec_disabled;
56 	}
57 	ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
58 	return ret;
59 }
60 
61 bool
62 extent_dss_prec_set(dss_prec_t dss_prec) {
63 	if (!have_dss) {
64 		return (dss_prec != dss_prec_disabled);
65 	}
66 	atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
67 	return false;
68 }
69 
70 static void
71 extent_dss_extending_start(void) {
72 	spin_t spinner = SPIN_INITIALIZER;
73 	while (true) {
74 		bool expected = false;
75 		if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
76 		    true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
77 			break;
78 		}
79 		spin_adaptive(&spinner);
80 	}
81 }
82 
83 static void
84 extent_dss_extending_finish(void) {
85 	assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
86 
87 	atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
88 }
89 
90 static void *
91 extent_dss_max_update(void *new_addr) {
92 	/*
93 	 * Get the current end of the DSS as max_cur and assure that dss_max is
94 	 * up to date.
95 	 */
96 	void *max_cur = extent_dss_sbrk(0);
97 	if (max_cur == (void *)-1) {
98 		return NULL;
99 	}
100 	atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
101 	/* Fixed new_addr can only be supported if it is at the edge of DSS. */
102 	if (new_addr != NULL && max_cur != new_addr) {
103 		return NULL;
104 	}
105 	return max_cur;
106 }
107 
108 void *
109 extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
110     size_t alignment, bool *zero, bool *commit) {
111 	edata_t *gap;
112 
113 	cassert(have_dss);
114 	assert(size > 0);
115 	assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
116 
117 	/*
118 	 * sbrk() uses a signed increment argument, so take care not to
119 	 * interpret a large allocation request as a negative increment.
120 	 */
121 	if ((intptr_t)size < 0) {
122 		return NULL;
123 	}
124 
125 	gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
126 	if (gap == NULL) {
127 		return NULL;
128 	}
129 
130 	extent_dss_extending_start();
131 	if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
132 		/*
133 		 * The loop is necessary to recover from races with other
134 		 * threads that are using the DSS for something other than
135 		 * malloc.
136 		 */
137 		while (true) {
138 			void *max_cur = extent_dss_max_update(new_addr);
139 			if (max_cur == NULL) {
140 				goto label_oom;
141 			}
142 
143 			bool head_state = opt_retain ? EXTENT_IS_HEAD :
144 			    EXTENT_NOT_HEAD;
145 			/*
146 			 * Compute how much page-aligned gap space (if any) is
147 			 * necessary to satisfy alignment.  This space can be
148 			 * recycled for later use.
149 			 */
150 			void *gap_addr_page = (void *)(PAGE_CEILING(
151 			    (uintptr_t)max_cur));
152 			void *ret = (void *)ALIGNMENT_CEILING(
153 			    (uintptr_t)gap_addr_page, alignment);
154 			size_t gap_size_page = (uintptr_t)ret -
155 			    (uintptr_t)gap_addr_page;
156 			if (gap_size_page != 0) {
157 				edata_init(gap, arena_ind_get(arena),
158 				    gap_addr_page, gap_size_page, false,
159 				    SC_NSIZES, extent_sn_next(
160 					&arena->pa_shard.pac),
161 				    extent_state_active, false, true,
162 				    EXTENT_PAI_PAC, head_state);
163 			}
164 			/*
165 			 * Compute the address just past the end of the desired
166 			 * allocation space.
167 			 */
168 			void *dss_next = (void *)((uintptr_t)ret + size);
169 			if ((uintptr_t)ret < (uintptr_t)max_cur ||
170 			    (uintptr_t)dss_next < (uintptr_t)max_cur) {
171 				goto label_oom; /* Wrap-around. */
172 			}
173 			/* Compute the increment, including subpage bytes. */
174 			void *gap_addr_subpage = max_cur;
175 			size_t gap_size_subpage = (uintptr_t)ret -
176 			    (uintptr_t)gap_addr_subpage;
177 			intptr_t incr = gap_size_subpage + size;
178 
179 			assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
180 			    size);
181 
182 			/* Try to allocate. */
183 			void *dss_prev = extent_dss_sbrk(incr);
184 			if (dss_prev == max_cur) {
185 				/* Success. */
186 				atomic_store_p(&dss_max, dss_next,
187 				    ATOMIC_RELEASE);
188 				extent_dss_extending_finish();
189 
190 				if (gap_size_page != 0) {
191 					ehooks_t *ehooks = arena_get_ehooks(
192 					    arena);
193 					extent_dalloc_gap(tsdn,
194 					    &arena->pa_shard.pac, ehooks, gap);
195 				} else {
196 					edata_cache_put(tsdn,
197 					    &arena->pa_shard.edata_cache, gap);
198 				}
199 				if (!*commit) {
200 					*commit = pages_decommit(ret, size);
201 				}
202 				if (*zero && *commit) {
203 					edata_t edata = {0};
204 					ehooks_t *ehooks = arena_get_ehooks(
205 					    arena);
206 
207 					edata_init(&edata,
208 					    arena_ind_get(arena), ret, size,
209 					    size, false, SC_NSIZES,
210 					    extent_state_active, false, true,
211 					    EXTENT_PAI_PAC, head_state);
212 					if (extent_purge_forced_wrapper(tsdn,
213 					    ehooks, &edata, 0, size)) {
214 						memset(ret, 0, size);
215 					}
216 				}
217 				return ret;
218 			}
219 			/*
220 			 * Failure, whether due to OOM or a race with a raw
221 			 * sbrk() call from outside the allocator.
222 			 */
223 			if (dss_prev == (void *)-1) {
224 				/* OOM. */
225 				atomic_store_b(&dss_exhausted, true,
226 				    ATOMIC_RELEASE);
227 				goto label_oom;
228 			}
229 		}
230 	}
231 label_oom:
232 	extent_dss_extending_finish();
233 	edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
234 	return NULL;
235 }
236 
237 static bool
238 extent_in_dss_helper(void *addr, void *max) {
239 	return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
240 	    (uintptr_t)max);
241 }
242 
243 bool
244 extent_in_dss(void *addr) {
245 	cassert(have_dss);
246 
247 	return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
248 	    ATOMIC_ACQUIRE));
249 }
250 
251 bool
252 extent_dss_mergeable(void *addr_a, void *addr_b) {
253 	void *max;
254 
255 	cassert(have_dss);
256 
257 	if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
258 	    (uintptr_t)dss_base) {
259 		return true;
260 	}
261 
262 	max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
263 	return (extent_in_dss_helper(addr_a, max) ==
264 	    extent_in_dss_helper(addr_b, max));
265 }
266 
267 void
268 extent_dss_boot(void) {
269 	cassert(have_dss);
270 
271 	dss_base = extent_dss_sbrk(0);
272 	atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
273 	atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
274 	atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
275 }
276 
277 /******************************************************************************/
278