xref: /freebsd/contrib/jemalloc/src/zone.c (revision bf6039f09a30484c0749a3e3047d6a47b116b466)
1*bf6039f0SWarner Losh #include "jemalloc/internal/jemalloc_preamble.h"
2*bf6039f0SWarner Losh #include "jemalloc/internal/jemalloc_internal_includes.h"
3*bf6039f0SWarner Losh 
4*bf6039f0SWarner Losh #include "jemalloc/internal/assert.h"
5*bf6039f0SWarner Losh 
6*bf6039f0SWarner Losh #ifndef JEMALLOC_ZONE
7*bf6039f0SWarner Losh #  error "This source file is for zones on Darwin (OS X)."
8*bf6039f0SWarner Losh #endif
9*bf6039f0SWarner Losh 
10*bf6039f0SWarner Losh /* Definitions of the following structs in malloc/malloc.h might be too old
11*bf6039f0SWarner Losh  * for the built binary to run on newer versions of OSX. So use the newest
12*bf6039f0SWarner Losh  * possible version of those structs.
13*bf6039f0SWarner Losh  */
14*bf6039f0SWarner Losh typedef struct _malloc_zone_t {
15*bf6039f0SWarner Losh 	void *reserved1;
16*bf6039f0SWarner Losh 	void *reserved2;
17*bf6039f0SWarner Losh 	size_t (*size)(struct _malloc_zone_t *, const void *);
18*bf6039f0SWarner Losh 	void *(*malloc)(struct _malloc_zone_t *, size_t);
19*bf6039f0SWarner Losh 	void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
20*bf6039f0SWarner Losh 	void *(*valloc)(struct _malloc_zone_t *, size_t);
21*bf6039f0SWarner Losh 	void (*free)(struct _malloc_zone_t *, void *);
22*bf6039f0SWarner Losh 	void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
23*bf6039f0SWarner Losh 	void (*destroy)(struct _malloc_zone_t *);
24*bf6039f0SWarner Losh 	const char *zone_name;
25*bf6039f0SWarner Losh 	unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
26*bf6039f0SWarner Losh 	void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
27*bf6039f0SWarner Losh 	struct malloc_introspection_t *introspect;
28*bf6039f0SWarner Losh 	unsigned version;
29*bf6039f0SWarner Losh 	void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
30*bf6039f0SWarner Losh 	void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
31*bf6039f0SWarner Losh 	size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
32*bf6039f0SWarner Losh } malloc_zone_t;
33*bf6039f0SWarner Losh 
34*bf6039f0SWarner Losh typedef struct {
35*bf6039f0SWarner Losh 	vm_address_t address;
36*bf6039f0SWarner Losh 	vm_size_t size;
37*bf6039f0SWarner Losh } vm_range_t;
38*bf6039f0SWarner Losh 
39*bf6039f0SWarner Losh typedef struct malloc_statistics_t {
40*bf6039f0SWarner Losh 	unsigned blocks_in_use;
41*bf6039f0SWarner Losh 	size_t size_in_use;
42*bf6039f0SWarner Losh 	size_t max_size_in_use;
43*bf6039f0SWarner Losh 	size_t size_allocated;
44*bf6039f0SWarner Losh } malloc_statistics_t;
45*bf6039f0SWarner Losh 
46*bf6039f0SWarner Losh typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
47*bf6039f0SWarner Losh 
48*bf6039f0SWarner Losh typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
49*bf6039f0SWarner Losh 
50*bf6039f0SWarner Losh typedef struct malloc_introspection_t {
51*bf6039f0SWarner Losh 	kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
52*bf6039f0SWarner Losh 	size_t (*good_size)(malloc_zone_t *, size_t);
53*bf6039f0SWarner Losh 	boolean_t (*check)(malloc_zone_t *);
54*bf6039f0SWarner Losh 	void (*print)(malloc_zone_t *, boolean_t);
55*bf6039f0SWarner Losh 	void (*log)(malloc_zone_t *, void *);
56*bf6039f0SWarner Losh 	void (*force_lock)(malloc_zone_t *);
57*bf6039f0SWarner Losh 	void (*force_unlock)(malloc_zone_t *);
58*bf6039f0SWarner Losh 	void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
59*bf6039f0SWarner Losh 	boolean_t (*zone_locked)(malloc_zone_t *);
60*bf6039f0SWarner Losh 	boolean_t (*enable_discharge_checking)(malloc_zone_t *);
61*bf6039f0SWarner Losh 	boolean_t (*disable_discharge_checking)(malloc_zone_t *);
62*bf6039f0SWarner Losh 	void (*discharge)(malloc_zone_t *, void *);
63*bf6039f0SWarner Losh #ifdef __BLOCKS__
64*bf6039f0SWarner Losh 	void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
65*bf6039f0SWarner Losh #else
66*bf6039f0SWarner Losh 	void *enumerate_unavailable_without_blocks;
67*bf6039f0SWarner Losh #endif
68*bf6039f0SWarner Losh 	void (*reinit_lock)(malloc_zone_t *);
69*bf6039f0SWarner Losh } malloc_introspection_t;
70*bf6039f0SWarner Losh 
71*bf6039f0SWarner Losh extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
72*bf6039f0SWarner Losh 
73*bf6039f0SWarner Losh extern malloc_zone_t *malloc_default_zone(void);
74*bf6039f0SWarner Losh 
75*bf6039f0SWarner Losh extern void malloc_zone_register(malloc_zone_t *zone);
76*bf6039f0SWarner Losh 
77*bf6039f0SWarner Losh extern void malloc_zone_unregister(malloc_zone_t *zone);
78*bf6039f0SWarner Losh 
79*bf6039f0SWarner Losh /*
80*bf6039f0SWarner Losh  * The malloc_default_purgeable_zone() function is only available on >= 10.6.
81*bf6039f0SWarner Losh  * We need to check whether it is present at runtime, thus the weak_import.
82*bf6039f0SWarner Losh  */
83*bf6039f0SWarner Losh extern malloc_zone_t *malloc_default_purgeable_zone(void)
84*bf6039f0SWarner Losh JEMALLOC_ATTR(weak_import);
85*bf6039f0SWarner Losh 
86*bf6039f0SWarner Losh /******************************************************************************/
87*bf6039f0SWarner Losh /* Data. */
88*bf6039f0SWarner Losh 
89*bf6039f0SWarner Losh static malloc_zone_t *default_zone, *purgeable_zone;
90*bf6039f0SWarner Losh static malloc_zone_t jemalloc_zone;
91*bf6039f0SWarner Losh static struct malloc_introspection_t jemalloc_zone_introspect;
92*bf6039f0SWarner Losh static pid_t zone_force_lock_pid = -1;
93*bf6039f0SWarner Losh 
94*bf6039f0SWarner Losh /******************************************************************************/
95*bf6039f0SWarner Losh /* Function prototypes for non-inline static functions. */
96*bf6039f0SWarner Losh 
97*bf6039f0SWarner Losh static size_t	zone_size(malloc_zone_t *zone, const void *ptr);
98*bf6039f0SWarner Losh static void	*zone_malloc(malloc_zone_t *zone, size_t size);
99*bf6039f0SWarner Losh static void	*zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
100*bf6039f0SWarner Losh static void	*zone_valloc(malloc_zone_t *zone, size_t size);
101*bf6039f0SWarner Losh static void	zone_free(malloc_zone_t *zone, void *ptr);
102*bf6039f0SWarner Losh static void	*zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
103*bf6039f0SWarner Losh static void	*zone_memalign(malloc_zone_t *zone, size_t alignment,
104*bf6039f0SWarner Losh     size_t size);
105*bf6039f0SWarner Losh static void	zone_free_definite_size(malloc_zone_t *zone, void *ptr,
106*bf6039f0SWarner Losh     size_t size);
107*bf6039f0SWarner Losh static void	zone_destroy(malloc_zone_t *zone);
108*bf6039f0SWarner Losh static unsigned	zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
109*bf6039f0SWarner Losh     void **results, unsigned num_requested);
110*bf6039f0SWarner Losh static void	zone_batch_free(struct _malloc_zone_t *zone,
111*bf6039f0SWarner Losh     void **to_be_freed, unsigned num_to_be_freed);
112*bf6039f0SWarner Losh static size_t	zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
113*bf6039f0SWarner Losh static size_t	zone_good_size(malloc_zone_t *zone, size_t size);
114*bf6039f0SWarner Losh static kern_return_t	zone_enumerator(task_t task, void *data, unsigned type_mask,
115*bf6039f0SWarner Losh     vm_address_t zone_address, memory_reader_t reader,
116*bf6039f0SWarner Losh     vm_range_recorder_t recorder);
117*bf6039f0SWarner Losh static boolean_t	zone_check(malloc_zone_t *zone);
118*bf6039f0SWarner Losh static void	zone_print(malloc_zone_t *zone, boolean_t verbose);
119*bf6039f0SWarner Losh static void	zone_log(malloc_zone_t *zone, void *address);
120*bf6039f0SWarner Losh static void	zone_force_lock(malloc_zone_t *zone);
121*bf6039f0SWarner Losh static void	zone_force_unlock(malloc_zone_t *zone);
122*bf6039f0SWarner Losh static void	zone_statistics(malloc_zone_t *zone,
123*bf6039f0SWarner Losh     malloc_statistics_t *stats);
124*bf6039f0SWarner Losh static boolean_t	zone_locked(malloc_zone_t *zone);
125*bf6039f0SWarner Losh static void	zone_reinit_lock(malloc_zone_t *zone);
126*bf6039f0SWarner Losh 
127*bf6039f0SWarner Losh /******************************************************************************/
128*bf6039f0SWarner Losh /*
129*bf6039f0SWarner Losh  * Functions.
130*bf6039f0SWarner Losh  */
131*bf6039f0SWarner Losh 
132*bf6039f0SWarner Losh static size_t
133*bf6039f0SWarner Losh zone_size(malloc_zone_t *zone, const void *ptr) {
134*bf6039f0SWarner Losh 	/*
135*bf6039f0SWarner Losh 	 * There appear to be places within Darwin (such as setenv(3)) that
136*bf6039f0SWarner Losh 	 * cause calls to this function with pointers that *no* zone owns.  If
137*bf6039f0SWarner Losh 	 * we knew that all pointers were owned by *some* zone, we could split
138*bf6039f0SWarner Losh 	 * our zone into two parts, and use one as the default allocator and
139*bf6039f0SWarner Losh 	 * the other as the default deallocator/reallocator.  Since that will
140*bf6039f0SWarner Losh 	 * not work in practice, we must check all pointers to assure that they
141*bf6039f0SWarner Losh 	 * reside within a mapped extent before determining size.
142*bf6039f0SWarner Losh 	 */
143*bf6039f0SWarner Losh 	return ivsalloc(tsdn_fetch(), ptr);
144*bf6039f0SWarner Losh }
145*bf6039f0SWarner Losh 
146*bf6039f0SWarner Losh static void *
147*bf6039f0SWarner Losh zone_malloc(malloc_zone_t *zone, size_t size) {
148*bf6039f0SWarner Losh 	return je_malloc(size);
149*bf6039f0SWarner Losh }
150*bf6039f0SWarner Losh 
151*bf6039f0SWarner Losh static void *
152*bf6039f0SWarner Losh zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
153*bf6039f0SWarner Losh 	return je_calloc(num, size);
154*bf6039f0SWarner Losh }
155*bf6039f0SWarner Losh 
156*bf6039f0SWarner Losh static void *
157*bf6039f0SWarner Losh zone_valloc(malloc_zone_t *zone, size_t size) {
158*bf6039f0SWarner Losh 	void *ret = NULL; /* Assignment avoids useless compiler warning. */
159*bf6039f0SWarner Losh 
160*bf6039f0SWarner Losh 	je_posix_memalign(&ret, PAGE, size);
161*bf6039f0SWarner Losh 
162*bf6039f0SWarner Losh 	return ret;
163*bf6039f0SWarner Losh }
164*bf6039f0SWarner Losh 
165*bf6039f0SWarner Losh static void
166*bf6039f0SWarner Losh zone_free(malloc_zone_t *zone, void *ptr) {
167*bf6039f0SWarner Losh 	if (ivsalloc(tsdn_fetch(), ptr) != 0) {
168*bf6039f0SWarner Losh 		je_free(ptr);
169*bf6039f0SWarner Losh 		return;
170*bf6039f0SWarner Losh 	}
171*bf6039f0SWarner Losh 
172*bf6039f0SWarner Losh 	free(ptr);
173*bf6039f0SWarner Losh }
174*bf6039f0SWarner Losh 
175*bf6039f0SWarner Losh static void *
176*bf6039f0SWarner Losh zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
177*bf6039f0SWarner Losh 	if (ivsalloc(tsdn_fetch(), ptr) != 0) {
178*bf6039f0SWarner Losh 		return je_realloc(ptr, size);
179*bf6039f0SWarner Losh 	}
180*bf6039f0SWarner Losh 
181*bf6039f0SWarner Losh 	return realloc(ptr, size);
182*bf6039f0SWarner Losh }
183*bf6039f0SWarner Losh 
184*bf6039f0SWarner Losh static void *
185*bf6039f0SWarner Losh zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
186*bf6039f0SWarner Losh 	void *ret = NULL; /* Assignment avoids useless compiler warning. */
187*bf6039f0SWarner Losh 
188*bf6039f0SWarner Losh 	je_posix_memalign(&ret, alignment, size);
189*bf6039f0SWarner Losh 
190*bf6039f0SWarner Losh 	return ret;
191*bf6039f0SWarner Losh }
192*bf6039f0SWarner Losh 
193*bf6039f0SWarner Losh static void
194*bf6039f0SWarner Losh zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
195*bf6039f0SWarner Losh 	size_t alloc_size;
196*bf6039f0SWarner Losh 
197*bf6039f0SWarner Losh 	alloc_size = ivsalloc(tsdn_fetch(), ptr);
198*bf6039f0SWarner Losh 	if (alloc_size != 0) {
199*bf6039f0SWarner Losh 		assert(alloc_size == size);
200*bf6039f0SWarner Losh 		je_free(ptr);
201*bf6039f0SWarner Losh 		return;
202*bf6039f0SWarner Losh 	}
203*bf6039f0SWarner Losh 
204*bf6039f0SWarner Losh 	free(ptr);
205*bf6039f0SWarner Losh }
206*bf6039f0SWarner Losh 
207*bf6039f0SWarner Losh static void
208*bf6039f0SWarner Losh zone_destroy(malloc_zone_t *zone) {
209*bf6039f0SWarner Losh 	/* This function should never be called. */
210*bf6039f0SWarner Losh 	not_reached();
211*bf6039f0SWarner Losh }
212*bf6039f0SWarner Losh 
213*bf6039f0SWarner Losh static unsigned
214*bf6039f0SWarner Losh zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
215*bf6039f0SWarner Losh     unsigned num_requested) {
216*bf6039f0SWarner Losh 	unsigned i;
217*bf6039f0SWarner Losh 
218*bf6039f0SWarner Losh 	for (i = 0; i < num_requested; i++) {
219*bf6039f0SWarner Losh 		results[i] = je_malloc(size);
220*bf6039f0SWarner Losh 		if (!results[i])
221*bf6039f0SWarner Losh 			break;
222*bf6039f0SWarner Losh 	}
223*bf6039f0SWarner Losh 
224*bf6039f0SWarner Losh 	return i;
225*bf6039f0SWarner Losh }
226*bf6039f0SWarner Losh 
227*bf6039f0SWarner Losh static void
228*bf6039f0SWarner Losh zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
229*bf6039f0SWarner Losh     unsigned num_to_be_freed) {
230*bf6039f0SWarner Losh 	unsigned i;
231*bf6039f0SWarner Losh 
232*bf6039f0SWarner Losh 	for (i = 0; i < num_to_be_freed; i++) {
233*bf6039f0SWarner Losh 		zone_free(zone, to_be_freed[i]);
234*bf6039f0SWarner Losh 		to_be_freed[i] = NULL;
235*bf6039f0SWarner Losh 	}
236*bf6039f0SWarner Losh }
237*bf6039f0SWarner Losh 
238*bf6039f0SWarner Losh static size_t
239*bf6039f0SWarner Losh zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
240*bf6039f0SWarner Losh 	return 0;
241*bf6039f0SWarner Losh }
242*bf6039f0SWarner Losh 
243*bf6039f0SWarner Losh static size_t
244*bf6039f0SWarner Losh zone_good_size(malloc_zone_t *zone, size_t size) {
245*bf6039f0SWarner Losh 	if (size == 0) {
246*bf6039f0SWarner Losh 		size = 1;
247*bf6039f0SWarner Losh 	}
248*bf6039f0SWarner Losh 	return sz_s2u(size);
249*bf6039f0SWarner Losh }
250*bf6039f0SWarner Losh 
251*bf6039f0SWarner Losh static kern_return_t
252*bf6039f0SWarner Losh zone_enumerator(task_t task, void *data, unsigned type_mask,
253*bf6039f0SWarner Losh     vm_address_t zone_address, memory_reader_t reader,
254*bf6039f0SWarner Losh     vm_range_recorder_t recorder) {
255*bf6039f0SWarner Losh 	return KERN_SUCCESS;
256*bf6039f0SWarner Losh }
257*bf6039f0SWarner Losh 
258*bf6039f0SWarner Losh static boolean_t
259*bf6039f0SWarner Losh zone_check(malloc_zone_t *zone) {
260*bf6039f0SWarner Losh 	return true;
261*bf6039f0SWarner Losh }
262*bf6039f0SWarner Losh 
263*bf6039f0SWarner Losh static void
264*bf6039f0SWarner Losh zone_print(malloc_zone_t *zone, boolean_t verbose) {
265*bf6039f0SWarner Losh }
266*bf6039f0SWarner Losh 
267*bf6039f0SWarner Losh static void
268*bf6039f0SWarner Losh zone_log(malloc_zone_t *zone, void *address) {
269*bf6039f0SWarner Losh }
270*bf6039f0SWarner Losh 
271*bf6039f0SWarner Losh static void
272*bf6039f0SWarner Losh zone_force_lock(malloc_zone_t *zone) {
273*bf6039f0SWarner Losh 	if (isthreaded) {
274*bf6039f0SWarner Losh 		/*
275*bf6039f0SWarner Losh 		 * See the note in zone_force_unlock, below, to see why we need
276*bf6039f0SWarner Losh 		 * this.
277*bf6039f0SWarner Losh 		 */
278*bf6039f0SWarner Losh 		assert(zone_force_lock_pid == -1);
279*bf6039f0SWarner Losh 		zone_force_lock_pid = getpid();
280*bf6039f0SWarner Losh 		jemalloc_prefork();
281*bf6039f0SWarner Losh 	}
282*bf6039f0SWarner Losh }
283*bf6039f0SWarner Losh 
284*bf6039f0SWarner Losh static void
285*bf6039f0SWarner Losh zone_force_unlock(malloc_zone_t *zone) {
286*bf6039f0SWarner Losh 	/*
287*bf6039f0SWarner Losh 	 * zone_force_lock and zone_force_unlock are the entry points to the
288*bf6039f0SWarner Losh 	 * forking machinery on OS X.  The tricky thing is, the child is not
289*bf6039f0SWarner Losh 	 * allowed to unlock mutexes locked in the parent, even if owned by the
290*bf6039f0SWarner Losh 	 * forking thread (and the mutex type we use in OS X will fail an assert
291*bf6039f0SWarner Losh 	 * if we try).  In the child, we can get away with reinitializing all
292*bf6039f0SWarner Losh 	 * the mutexes, which has the effect of unlocking them.  In the parent,
293*bf6039f0SWarner Losh 	 * doing this would mean we wouldn't wake any waiters blocked on the
294*bf6039f0SWarner Losh 	 * mutexes we unlock.  So, we record the pid of the current thread in
295*bf6039f0SWarner Losh 	 * zone_force_lock, and use that to detect if we're in the parent or
296*bf6039f0SWarner Losh 	 * child here, to decide which unlock logic we need.
297*bf6039f0SWarner Losh 	 */
298*bf6039f0SWarner Losh 	if (isthreaded) {
299*bf6039f0SWarner Losh 		assert(zone_force_lock_pid != -1);
300*bf6039f0SWarner Losh 		if (getpid() == zone_force_lock_pid) {
301*bf6039f0SWarner Losh 			jemalloc_postfork_parent();
302*bf6039f0SWarner Losh 		} else {
303*bf6039f0SWarner Losh 			jemalloc_postfork_child();
304*bf6039f0SWarner Losh 		}
305*bf6039f0SWarner Losh 		zone_force_lock_pid = -1;
306*bf6039f0SWarner Losh 	}
307*bf6039f0SWarner Losh }
308*bf6039f0SWarner Losh 
309*bf6039f0SWarner Losh static void
310*bf6039f0SWarner Losh zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
311*bf6039f0SWarner Losh 	/* We make no effort to actually fill the values */
312*bf6039f0SWarner Losh 	stats->blocks_in_use = 0;
313*bf6039f0SWarner Losh 	stats->size_in_use = 0;
314*bf6039f0SWarner Losh 	stats->max_size_in_use = 0;
315*bf6039f0SWarner Losh 	stats->size_allocated = 0;
316*bf6039f0SWarner Losh }
317*bf6039f0SWarner Losh 
318*bf6039f0SWarner Losh static boolean_t
319*bf6039f0SWarner Losh zone_locked(malloc_zone_t *zone) {
320*bf6039f0SWarner Losh 	/* Pretend no lock is being held */
321*bf6039f0SWarner Losh 	return false;
322*bf6039f0SWarner Losh }
323*bf6039f0SWarner Losh 
324*bf6039f0SWarner Losh static void
325*bf6039f0SWarner Losh zone_reinit_lock(malloc_zone_t *zone) {
326*bf6039f0SWarner Losh 	/* As of OSX 10.12, this function is only used when force_unlock would
327*bf6039f0SWarner Losh 	 * be used if the zone version were < 9. So just use force_unlock. */
328*bf6039f0SWarner Losh 	zone_force_unlock(zone);
329*bf6039f0SWarner Losh }
330*bf6039f0SWarner Losh 
331*bf6039f0SWarner Losh static void
332*bf6039f0SWarner Losh zone_init(void) {
333*bf6039f0SWarner Losh 	jemalloc_zone.size = zone_size;
334*bf6039f0SWarner Losh 	jemalloc_zone.malloc = zone_malloc;
335*bf6039f0SWarner Losh 	jemalloc_zone.calloc = zone_calloc;
336*bf6039f0SWarner Losh 	jemalloc_zone.valloc = zone_valloc;
337*bf6039f0SWarner Losh 	jemalloc_zone.free = zone_free;
338*bf6039f0SWarner Losh 	jemalloc_zone.realloc = zone_realloc;
339*bf6039f0SWarner Losh 	jemalloc_zone.destroy = zone_destroy;
340*bf6039f0SWarner Losh 	jemalloc_zone.zone_name = "jemalloc_zone";
341*bf6039f0SWarner Losh 	jemalloc_zone.batch_malloc = zone_batch_malloc;
342*bf6039f0SWarner Losh 	jemalloc_zone.batch_free = zone_batch_free;
343*bf6039f0SWarner Losh 	jemalloc_zone.introspect = &jemalloc_zone_introspect;
344*bf6039f0SWarner Losh 	jemalloc_zone.version = 9;
345*bf6039f0SWarner Losh 	jemalloc_zone.memalign = zone_memalign;
346*bf6039f0SWarner Losh 	jemalloc_zone.free_definite_size = zone_free_definite_size;
347*bf6039f0SWarner Losh 	jemalloc_zone.pressure_relief = zone_pressure_relief;
348*bf6039f0SWarner Losh 
349*bf6039f0SWarner Losh 	jemalloc_zone_introspect.enumerator = zone_enumerator;
350*bf6039f0SWarner Losh 	jemalloc_zone_introspect.good_size = zone_good_size;
351*bf6039f0SWarner Losh 	jemalloc_zone_introspect.check = zone_check;
352*bf6039f0SWarner Losh 	jemalloc_zone_introspect.print = zone_print;
353*bf6039f0SWarner Losh 	jemalloc_zone_introspect.log = zone_log;
354*bf6039f0SWarner Losh 	jemalloc_zone_introspect.force_lock = zone_force_lock;
355*bf6039f0SWarner Losh 	jemalloc_zone_introspect.force_unlock = zone_force_unlock;
356*bf6039f0SWarner Losh 	jemalloc_zone_introspect.statistics = zone_statistics;
357*bf6039f0SWarner Losh 	jemalloc_zone_introspect.zone_locked = zone_locked;
358*bf6039f0SWarner Losh 	jemalloc_zone_introspect.enable_discharge_checking = NULL;
359*bf6039f0SWarner Losh 	jemalloc_zone_introspect.disable_discharge_checking = NULL;
360*bf6039f0SWarner Losh 	jemalloc_zone_introspect.discharge = NULL;
361*bf6039f0SWarner Losh #ifdef __BLOCKS__
362*bf6039f0SWarner Losh 	jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
363*bf6039f0SWarner Losh #else
364*bf6039f0SWarner Losh 	jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
365*bf6039f0SWarner Losh #endif
366*bf6039f0SWarner Losh 	jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
367*bf6039f0SWarner Losh }
368*bf6039f0SWarner Losh 
369*bf6039f0SWarner Losh static malloc_zone_t *
370*bf6039f0SWarner Losh zone_default_get(void) {
371*bf6039f0SWarner Losh 	malloc_zone_t **zones = NULL;
372*bf6039f0SWarner Losh 	unsigned int num_zones = 0;
373*bf6039f0SWarner Losh 
374*bf6039f0SWarner Losh 	/*
375*bf6039f0SWarner Losh 	 * On OSX 10.12, malloc_default_zone returns a special zone that is not
376*bf6039f0SWarner Losh 	 * present in the list of registered zones. That zone uses a "lite zone"
377*bf6039f0SWarner Losh 	 * if one is present (apparently enabled when malloc stack logging is
378*bf6039f0SWarner Losh 	 * enabled), or the first registered zone otherwise. In practice this
379*bf6039f0SWarner Losh 	 * means unless malloc stack logging is enabled, the first registered
380*bf6039f0SWarner Losh 	 * zone is the default.  So get the list of zones to get the first one,
381*bf6039f0SWarner Losh 	 * instead of relying on malloc_default_zone.
382*bf6039f0SWarner Losh 	 */
383*bf6039f0SWarner Losh 	if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
384*bf6039f0SWarner Losh 	    (vm_address_t**)&zones, &num_zones)) {
385*bf6039f0SWarner Losh 		/*
386*bf6039f0SWarner Losh 		 * Reset the value in case the failure happened after it was
387*bf6039f0SWarner Losh 		 * set.
388*bf6039f0SWarner Losh 		 */
389*bf6039f0SWarner Losh 		num_zones = 0;
390*bf6039f0SWarner Losh 	}
391*bf6039f0SWarner Losh 
392*bf6039f0SWarner Losh 	if (num_zones) {
393*bf6039f0SWarner Losh 		return zones[0];
394*bf6039f0SWarner Losh 	}
395*bf6039f0SWarner Losh 
396*bf6039f0SWarner Losh 	return malloc_default_zone();
397*bf6039f0SWarner Losh }
398*bf6039f0SWarner Losh 
399*bf6039f0SWarner Losh /* As written, this function can only promote jemalloc_zone. */
400*bf6039f0SWarner Losh static void
401*bf6039f0SWarner Losh zone_promote(void) {
402*bf6039f0SWarner Losh 	malloc_zone_t *zone;
403*bf6039f0SWarner Losh 
404*bf6039f0SWarner Losh 	do {
405*bf6039f0SWarner Losh 		/*
406*bf6039f0SWarner Losh 		 * Unregister and reregister the default zone.  On OSX >= 10.6,
407*bf6039f0SWarner Losh 		 * unregistering takes the last registered zone and places it
408*bf6039f0SWarner Losh 		 * at the location of the specified zone.  Unregistering the
409*bf6039f0SWarner Losh 		 * default zone thus makes the last registered one the default.
410*bf6039f0SWarner Losh 		 * On OSX < 10.6, unregistering shifts all registered zones.
411*bf6039f0SWarner Losh 		 * The first registered zone then becomes the default.
412*bf6039f0SWarner Losh 		 */
413*bf6039f0SWarner Losh 		malloc_zone_unregister(default_zone);
414*bf6039f0SWarner Losh 		malloc_zone_register(default_zone);
415*bf6039f0SWarner Losh 
416*bf6039f0SWarner Losh 		/*
417*bf6039f0SWarner Losh 		 * On OSX 10.6, having the default purgeable zone appear before
418*bf6039f0SWarner Losh 		 * the default zone makes some things crash because it thinks it
419*bf6039f0SWarner Losh 		 * owns the default zone allocated pointers.  We thus
420*bf6039f0SWarner Losh 		 * unregister/re-register it in order to ensure it's always
421*bf6039f0SWarner Losh 		 * after the default zone.  On OSX < 10.6, there is no purgeable
422*bf6039f0SWarner Losh 		 * zone, so this does nothing.  On OSX >= 10.6, unregistering
423*bf6039f0SWarner Losh 		 * replaces the purgeable zone with the last registered zone
424*bf6039f0SWarner Losh 		 * above, i.e. the default zone.  Registering it again then puts
425*bf6039f0SWarner Losh 		 * it at the end, obviously after the default zone.
426*bf6039f0SWarner Losh 		 */
427*bf6039f0SWarner Losh 		if (purgeable_zone != NULL) {
428*bf6039f0SWarner Losh 			malloc_zone_unregister(purgeable_zone);
429*bf6039f0SWarner Losh 			malloc_zone_register(purgeable_zone);
430*bf6039f0SWarner Losh 		}
431*bf6039f0SWarner Losh 
432*bf6039f0SWarner Losh 		zone = zone_default_get();
433*bf6039f0SWarner Losh 	} while (zone != &jemalloc_zone);
434*bf6039f0SWarner Losh }
435*bf6039f0SWarner Losh 
436*bf6039f0SWarner Losh JEMALLOC_ATTR(constructor)
437*bf6039f0SWarner Losh void
438*bf6039f0SWarner Losh zone_register(void) {
439*bf6039f0SWarner Losh 	/*
440*bf6039f0SWarner Losh 	 * If something else replaced the system default zone allocator, don't
441*bf6039f0SWarner Losh 	 * register jemalloc's.
442*bf6039f0SWarner Losh 	 */
443*bf6039f0SWarner Losh 	default_zone = zone_default_get();
444*bf6039f0SWarner Losh 	if (!default_zone->zone_name || strcmp(default_zone->zone_name,
445*bf6039f0SWarner Losh 	    "DefaultMallocZone") != 0) {
446*bf6039f0SWarner Losh 		return;
447*bf6039f0SWarner Losh 	}
448*bf6039f0SWarner Losh 
449*bf6039f0SWarner Losh 	/*
450*bf6039f0SWarner Losh 	 * The default purgeable zone is created lazily by OSX's libc.  It uses
451*bf6039f0SWarner Losh 	 * the default zone when it is created for "small" allocations
452*bf6039f0SWarner Losh 	 * (< 15 KiB), but assumes the default zone is a scalable_zone.  This
453*bf6039f0SWarner Losh 	 * obviously fails when the default zone is the jemalloc zone, so
454*bf6039f0SWarner Losh 	 * malloc_default_purgeable_zone() is called beforehand so that the
455*bf6039f0SWarner Losh 	 * default purgeable zone is created when the default zone is still
456*bf6039f0SWarner Losh 	 * a scalable_zone.  As purgeable zones only exist on >= 10.6, we need
457*bf6039f0SWarner Losh 	 * to check for the existence of malloc_default_purgeable_zone() at
458*bf6039f0SWarner Losh 	 * run time.
459*bf6039f0SWarner Losh 	 */
460*bf6039f0SWarner Losh 	purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
461*bf6039f0SWarner Losh 	    malloc_default_purgeable_zone();
462*bf6039f0SWarner Losh 
463*bf6039f0SWarner Losh 	/* Register the custom zone.  At this point it won't be the default. */
464*bf6039f0SWarner Losh 	zone_init();
465*bf6039f0SWarner Losh 	malloc_zone_register(&jemalloc_zone);
466*bf6039f0SWarner Losh 
467*bf6039f0SWarner Losh 	/* Promote the custom zone to be default. */
468*bf6039f0SWarner Losh 	zone_promote();
469*bf6039f0SWarner Losh }
470