1 #define JEMALLOC_LARGE_C_ 2 #include "jemalloc/internal/jemalloc_preamble.h" 3 #include "jemalloc/internal/jemalloc_internal_includes.h" 4 5 #include "jemalloc/internal/assert.h" 6 #include "jemalloc/internal/extent_mmap.h" 7 #include "jemalloc/internal/mutex.h" 8 #include "jemalloc/internal/rtree.h" 9 #include "jemalloc/internal/util.h" 10 11 /******************************************************************************/ 12 13 void * 14 large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { 15 assert(usize == sz_s2u(usize)); 16 17 return large_palloc(tsdn, arena, usize, CACHELINE, zero); 18 } 19 20 void * 21 large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 22 bool zero) { 23 size_t ausize; 24 extent_t *extent; 25 bool is_zeroed; 26 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); 27 28 assert(!tsdn_null(tsdn) || arena != NULL); 29 30 ausize = sz_sa2u(usize, alignment); 31 if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { 32 return NULL; 33 } 34 35 if (config_fill && unlikely(opt_zero)) { 36 zero = true; 37 } 38 /* 39 * Copy zero into is_zeroed and pass the copy when allocating the 40 * extent, so that it is possible to make correct junk/zero fill 41 * decisions below, even if is_zeroed ends up true when zero is false. 42 */ 43 is_zeroed = zero; 44 if (likely(!tsdn_null(tsdn))) { 45 arena = arena_choose(tsdn_tsd(tsdn), arena); 46 } 47 if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, 48 arena, usize, alignment, &is_zeroed)) == NULL) { 49 return NULL; 50 } 51 52 /* See comments in arena_bin_slabs_full_insert(). */ 53 if (!arena_is_auto(arena)) { 54 /* Insert extent into large. */ 55 malloc_mutex_lock(tsdn, &arena->large_mtx); 56 extent_list_append(&arena->large, extent); 57 malloc_mutex_unlock(tsdn, &arena->large_mtx); 58 } 59 if (config_prof && arena_prof_accum(tsdn, arena, usize)) { 60 prof_idump(tsdn); 61 } 62 63 if (zero) { 64 assert(is_zeroed); 65 } else if (config_fill && unlikely(opt_junk_alloc)) { 66 memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, 67 extent_usize_get(extent)); 68 } 69 70 arena_decay_tick(tsdn, arena); 71 return extent_addr_get(extent); 72 } 73 74 static void 75 large_dalloc_junk_impl(void *ptr, size_t size) { 76 memset(ptr, JEMALLOC_FREE_JUNK, size); 77 } 78 large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; 79 80 static void 81 large_dalloc_maybe_junk_impl(void *ptr, size_t size) { 82 if (config_fill && have_dss && unlikely(opt_junk_free)) { 83 /* 84 * Only bother junk filling if the extent isn't about to be 85 * unmapped. 86 */ 87 if (opt_retain || (have_dss && extent_in_dss(ptr))) { 88 large_dalloc_junk(ptr, size); 89 } 90 } 91 } 92 large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = 93 large_dalloc_maybe_junk_impl; 94 95 static bool 96 large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { 97 arena_t *arena = extent_arena_get(extent); 98 size_t oldusize = extent_usize_get(extent); 99 extent_hooks_t *extent_hooks = extent_hooks_get(arena); 100 size_t diff = extent_size_get(extent) - (usize + sz_large_pad); 101 102 assert(oldusize > usize); 103 104 if (extent_hooks->split == NULL) { 105 return true; 106 } 107 108 /* Split excess pages. */ 109 if (diff != 0) { 110 extent_t *trail = extent_split_wrapper(tsdn, arena, 111 &extent_hooks, extent, usize + sz_large_pad, 112 sz_size2index(usize), false, diff, NSIZES, false); 113 if (trail == NULL) { 114 return true; 115 } 116 117 if (config_fill && unlikely(opt_junk_free)) { 118 large_dalloc_maybe_junk(extent_addr_get(trail), 119 extent_size_get(trail)); 120 } 121 122 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); 123 } 124 125 arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); 126 127 return false; 128 } 129 130 static bool 131 large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, 132 bool zero) { 133 arena_t *arena = extent_arena_get(extent); 134 size_t oldusize = extent_usize_get(extent); 135 extent_hooks_t *extent_hooks = extent_hooks_get(arena); 136 size_t trailsize = usize - oldusize; 137 138 if (extent_hooks->merge == NULL) { 139 return true; 140 } 141 142 if (config_fill && unlikely(opt_zero)) { 143 zero = true; 144 } 145 /* 146 * Copy zero into is_zeroed_trail and pass the copy when allocating the 147 * extent, so that it is possible to make correct junk/zero fill 148 * decisions below, even if is_zeroed_trail ends up true when zero is 149 * false. 150 */ 151 bool is_zeroed_trail = zero; 152 bool commit = true; 153 extent_t *trail; 154 bool new_mapping; 155 if ((trail = extents_alloc(tsdn, arena, &extent_hooks, 156 &arena->extents_dirty, extent_past_get(extent), trailsize, 0, 157 CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL 158 || (trail = extents_alloc(tsdn, arena, &extent_hooks, 159 &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, 160 CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) { 161 if (config_stats) { 162 new_mapping = false; 163 } 164 } else { 165 if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, 166 extent_past_get(extent), trailsize, 0, CACHELINE, false, 167 NSIZES, &is_zeroed_trail, &commit)) == NULL) { 168 return true; 169 } 170 if (config_stats) { 171 new_mapping = true; 172 } 173 } 174 175 if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { 176 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); 177 return true; 178 } 179 rtree_ctx_t rtree_ctx_fallback; 180 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 181 szind_t szind = sz_size2index(usize); 182 extent_szind_set(extent, szind); 183 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, 184 (uintptr_t)extent_addr_get(extent), szind, false); 185 186 if (config_stats && new_mapping) { 187 arena_stats_mapped_add(tsdn, &arena->stats, trailsize); 188 } 189 190 if (zero) { 191 if (config_cache_oblivious) { 192 /* 193 * Zero the trailing bytes of the original allocation's 194 * last page, since they are in an indeterminate state. 195 * There will always be trailing bytes, because ptr's 196 * offset from the beginning of the extent is a multiple 197 * of CACHELINE in [0 .. PAGE). 198 */ 199 void *zbase = (void *) 200 ((uintptr_t)extent_addr_get(extent) + oldusize); 201 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + 202 PAGE)); 203 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; 204 assert(nzero > 0); 205 memset(zbase, 0, nzero); 206 } 207 assert(is_zeroed_trail); 208 } else if (config_fill && unlikely(opt_junk_alloc)) { 209 memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), 210 JEMALLOC_ALLOC_JUNK, usize - oldusize); 211 } 212 213 arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); 214 215 return false; 216 } 217 218 bool 219 large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, 220 size_t usize_max, bool zero) { 221 size_t oldusize = extent_usize_get(extent); 222 223 /* The following should have been caught by callers. */ 224 assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); 225 /* Both allocation sizes must be large to avoid a move. */ 226 assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); 227 228 if (usize_max > oldusize) { 229 /* Attempt to expand the allocation in-place. */ 230 if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, 231 zero)) { 232 arena_decay_tick(tsdn, extent_arena_get(extent)); 233 return false; 234 } 235 /* Try again, this time with usize_min. */ 236 if (usize_min < usize_max && usize_min > oldusize && 237 large_ralloc_no_move_expand(tsdn, extent, usize_min, 238 zero)) { 239 arena_decay_tick(tsdn, extent_arena_get(extent)); 240 return false; 241 } 242 } 243 244 /* 245 * Avoid moving the allocation if the existing extent size accommodates 246 * the new size. 247 */ 248 if (oldusize >= usize_min && oldusize <= usize_max) { 249 arena_decay_tick(tsdn, extent_arena_get(extent)); 250 return false; 251 } 252 253 /* Attempt to shrink the allocation in-place. */ 254 if (oldusize > usize_max) { 255 if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { 256 arena_decay_tick(tsdn, extent_arena_get(extent)); 257 return false; 258 } 259 } 260 return true; 261 } 262 263 static void * 264 large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 265 size_t alignment, bool zero) { 266 if (alignment <= CACHELINE) { 267 return large_malloc(tsdn, arena, usize, zero); 268 } 269 return large_palloc(tsdn, arena, usize, alignment, zero); 270 } 271 272 void * 273 large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, 274 size_t alignment, bool zero, tcache_t *tcache) { 275 size_t oldusize = extent_usize_get(extent); 276 277 /* The following should have been caught by callers. */ 278 assert(usize > 0 && usize <= LARGE_MAXCLASS); 279 /* Both allocation sizes must be large to avoid a move. */ 280 assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); 281 282 /* Try to avoid moving the allocation. */ 283 if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { 284 return extent_addr_get(extent); 285 } 286 287 /* 288 * usize and old size are different enough that we need to use a 289 * different size class. In that case, fall back to allocating new 290 * space and copying. 291 */ 292 void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, 293 zero); 294 if (ret == NULL) { 295 return NULL; 296 } 297 298 size_t copysize = (usize < oldusize) ? usize : oldusize; 299 memcpy(ret, extent_addr_get(extent), copysize); 300 isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); 301 return ret; 302 } 303 304 /* 305 * junked_locked indicates whether the extent's data have been junk-filled, and 306 * whether the arena's large_mtx is currently held. 307 */ 308 static void 309 large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 310 bool junked_locked) { 311 if (!junked_locked) { 312 /* See comments in arena_bin_slabs_full_insert(). */ 313 if (!arena_is_auto(arena)) { 314 malloc_mutex_lock(tsdn, &arena->large_mtx); 315 extent_list_remove(&arena->large, extent); 316 malloc_mutex_unlock(tsdn, &arena->large_mtx); 317 } 318 large_dalloc_maybe_junk(extent_addr_get(extent), 319 extent_usize_get(extent)); 320 } else { 321 malloc_mutex_assert_owner(tsdn, &arena->large_mtx); 322 if (!arena_is_auto(arena)) { 323 extent_list_remove(&arena->large, extent); 324 } 325 } 326 arena_extent_dalloc_large_prep(tsdn, arena, extent); 327 } 328 329 static void 330 large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 331 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 332 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); 333 } 334 335 void 336 large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { 337 large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); 338 } 339 340 void 341 large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { 342 large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); 343 } 344 345 void 346 large_dalloc(tsdn_t *tsdn, extent_t *extent) { 347 arena_t *arena = extent_arena_get(extent); 348 large_dalloc_prep_impl(tsdn, arena, extent, false); 349 large_dalloc_finish_impl(tsdn, arena, extent); 350 arena_decay_tick(tsdn, arena); 351 } 352 353 size_t 354 large_salloc(tsdn_t *tsdn, const extent_t *extent) { 355 return extent_usize_get(extent); 356 } 357 358 prof_tctx_t * 359 large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { 360 return extent_prof_tctx_get(extent); 361 } 362 363 void 364 large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { 365 extent_prof_tctx_set(extent, tctx); 366 } 367 368 void 369 large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { 370 large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); 371 } 372