xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
2 #define JEMALLOC_INTERNAL_TCACHE_INLINES_H
3 
4 #include "jemalloc/internal/bin.h"
5 #include "jemalloc/internal/jemalloc_internal_types.h"
6 #include "jemalloc/internal/san.h"
7 #include "jemalloc/internal/sc.h"
8 #include "jemalloc/internal/sz.h"
9 #include "jemalloc/internal/util.h"
10 
11 static inline bool
12 tcache_enabled_get(tsd_t *tsd) {
13 	return tsd_tcache_enabled_get(tsd);
14 }
15 
16 static inline void
17 tcache_enabled_set(tsd_t *tsd, bool enabled) {
18 	bool was_enabled = tsd_tcache_enabled_get(tsd);
19 
20 	if (!was_enabled && enabled) {
21 		tsd_tcache_data_init(tsd);
22 	} else if (was_enabled && !enabled) {
23 		tcache_cleanup(tsd);
24 	}
25 	/* Commit the state last.  Above calls check current state. */
26 	tsd_tcache_enabled_set(tsd, enabled);
27 	tsd_slow_update(tsd);
28 }
29 
30 JEMALLOC_ALWAYS_INLINE bool
31 tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) {
32 	assert(ind < SC_NBINS);
33 	bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0);
34 	if (ret && bin != NULL) {
35 		/* small size class but cache bin disabled. */
36 		assert(ind >= nhbins);
37 		assert((uintptr_t)(*bin->stack_head) ==
38 		    cache_bin_preceding_junk);
39 	}
40 
41 	return ret;
42 }
43 
44 JEMALLOC_ALWAYS_INLINE void *
45 tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
46     size_t size, szind_t binind, bool zero, bool slow_path) {
47 	void *ret;
48 	bool tcache_success;
49 
50 	assert(binind < SC_NBINS);
51 	cache_bin_t *bin = &tcache->bins[binind];
52 	ret = cache_bin_alloc(bin, &tcache_success);
53 	assert(tcache_success == (ret != NULL));
54 	if (unlikely(!tcache_success)) {
55 		bool tcache_hard_success;
56 		arena = arena_choose(tsd, arena);
57 		if (unlikely(arena == NULL)) {
58 			return NULL;
59 		}
60 		if (unlikely(tcache_small_bin_disabled(binind, bin))) {
61 			/* stats and zero are handled directly by the arena. */
62 			return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
63 			    binind, zero);
64 		}
65 		tcache_bin_flush_stashed(tsd, tcache, bin, binind,
66 		    /* is_small */ true);
67 
68 		ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
69 		    bin, binind, &tcache_hard_success);
70 		if (tcache_hard_success == false) {
71 			return NULL;
72 		}
73 	}
74 
75 	assert(ret);
76 	if (unlikely(zero)) {
77 		size_t usize = sz_index2size(binind);
78 		assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
79 		memset(ret, 0, usize);
80 	}
81 	if (config_stats) {
82 		bin->tstats.nrequests++;
83 	}
84 	return ret;
85 }
86 
87 JEMALLOC_ALWAYS_INLINE void *
88 tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
89     szind_t binind, bool zero, bool slow_path) {
90 	void *ret;
91 	bool tcache_success;
92 
93 	assert(binind >= SC_NBINS && binind < nhbins);
94 	cache_bin_t *bin = &tcache->bins[binind];
95 	ret = cache_bin_alloc(bin, &tcache_success);
96 	assert(tcache_success == (ret != NULL));
97 	if (unlikely(!tcache_success)) {
98 		/*
99 		 * Only allocate one large object at a time, because it's quite
100 		 * expensive to create one and not use it.
101 		 */
102 		arena = arena_choose(tsd, arena);
103 		if (unlikely(arena == NULL)) {
104 			return NULL;
105 		}
106 		tcache_bin_flush_stashed(tsd, tcache, bin, binind,
107 		    /* is_small */ false);
108 
109 		ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
110 		if (ret == NULL) {
111 			return NULL;
112 		}
113 	} else {
114 		if (unlikely(zero)) {
115 			size_t usize = sz_index2size(binind);
116 			assert(usize <= tcache_maxclass);
117 			memset(ret, 0, usize);
118 		}
119 
120 		if (config_stats) {
121 			bin->tstats.nrequests++;
122 		}
123 	}
124 
125 	return ret;
126 }
127 
128 JEMALLOC_ALWAYS_INLINE void
129 tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
130     bool slow_path) {
131 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
132 
133 	cache_bin_t *bin = &tcache->bins[binind];
134 	/*
135 	 * Not marking the branch unlikely because this is past free_fastpath()
136 	 * (which handles the most common cases), i.e. at this point it's often
137 	 * uncommon cases.
138 	 */
139 	if (cache_bin_nonfast_aligned(ptr)) {
140 		/* Junk unconditionally, even if bin is full. */
141 		san_junk_ptr(ptr, sz_index2size(binind));
142 		if (cache_bin_stash(bin, ptr)) {
143 			return;
144 		}
145 		assert(cache_bin_full(bin));
146 		/* Bin full; fall through into the flush branch. */
147 	}
148 
149 	if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
150 		if (unlikely(tcache_small_bin_disabled(binind, bin))) {
151 			arena_dalloc_small(tsd_tsdn(tsd), ptr);
152 			return;
153 		}
154 		cache_bin_sz_t max = cache_bin_info_ncached_max(
155 		    &tcache_bin_info[binind]);
156 		unsigned remain = max >> opt_lg_tcache_flush_small_div;
157 		tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
158 		bool ret = cache_bin_dalloc_easy(bin, ptr);
159 		assert(ret);
160 	}
161 }
162 
163 JEMALLOC_ALWAYS_INLINE void
164 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
165     bool slow_path) {
166 
167 	assert(tcache_salloc(tsd_tsdn(tsd), ptr)
168 	    > SC_SMALL_MAXCLASS);
169 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
170 
171 	cache_bin_t *bin = &tcache->bins[binind];
172 	if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
173 		unsigned remain = cache_bin_info_ncached_max(
174 		    &tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div;
175 		tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
176 		bool ret = cache_bin_dalloc_easy(bin, ptr);
177 		assert(ret);
178 	}
179 }
180 
181 JEMALLOC_ALWAYS_INLINE tcache_t *
182 tcaches_get(tsd_t *tsd, unsigned ind) {
183 	tcaches_t *elm = &tcaches[ind];
184 	if (unlikely(elm->tcache == NULL)) {
185 		malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
186 		abort();
187 	} else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
188 		elm->tcache = tcache_create_explicit(tsd);
189 	}
190 	return elm->tcache;
191 }
192 
193 #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
194