1 #define JEMALLOC_EXTENT_DSS_C_ 2 #include "jemalloc/internal/jemalloc_preamble.h" 3 #include "jemalloc/internal/jemalloc_internal_includes.h" 4 5 #include "jemalloc/internal/assert.h" 6 #include "jemalloc/internal/extent_dss.h" 7 #include "jemalloc/internal/spin.h" 8 9 /******************************************************************************/ 10 /* Data. */ 11 12 const char *opt_dss = DSS_DEFAULT; 13 14 const char *dss_prec_names[] = { 15 "disabled", 16 "primary", 17 "secondary", 18 "N/A" 19 }; 20 21 /* 22 * Current dss precedence default, used when creating new arenas. NB: This is 23 * stored as unsigned rather than dss_prec_t because in principle there's no 24 * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use 25 * atomic operations to synchronize the setting. 26 */ 27 static atomic_u_t dss_prec_default = ATOMIC_INIT( 28 (unsigned)DSS_PREC_DEFAULT); 29 30 /* Base address of the DSS. */ 31 static void *dss_base; 32 /* Atomic boolean indicating whether a thread is currently extending DSS. */ 33 static atomic_b_t dss_extending; 34 /* Atomic boolean indicating whether the DSS is exhausted. */ 35 static atomic_b_t dss_exhausted; 36 /* Atomic current upper limit on DSS addresses. */ 37 static atomic_p_t dss_max; 38 39 /******************************************************************************/ 40 41 static void * 42 extent_dss_sbrk(intptr_t increment) { 43 #ifdef JEMALLOC_DSS 44 return sbrk(increment); 45 #else 46 not_implemented(); 47 return NULL; 48 #endif 49 } 50 51 dss_prec_t 52 extent_dss_prec_get(void) { 53 dss_prec_t ret; 54 55 if (!have_dss) { 56 return dss_prec_disabled; 57 } 58 ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); 59 return ret; 60 } 61 62 bool 63 extent_dss_prec_set(dss_prec_t dss_prec) { 64 if (!have_dss) { 65 return (dss_prec != dss_prec_disabled); 66 } 67 atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); 68 return false; 69 } 70 71 static void 72 extent_dss_extending_start(void) { 73 spin_t spinner = SPIN_INITIALIZER; 74 while (true) { 75 bool expected = false; 76 if (atomic_compare_exchange_weak_b(&dss_extending, &expected, 77 true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { 78 break; 79 } 80 spin_adaptive(&spinner); 81 } 82 } 83 84 static void 85 extent_dss_extending_finish(void) { 86 assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); 87 88 atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); 89 } 90 91 static void * 92 extent_dss_max_update(void *new_addr) { 93 /* 94 * Get the current end of the DSS as max_cur and assure that dss_max is 95 * up to date. 96 */ 97 void *max_cur = extent_dss_sbrk(0); 98 if (max_cur == (void *)-1) { 99 return NULL; 100 } 101 atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); 102 /* Fixed new_addr can only be supported if it is at the edge of DSS. */ 103 if (new_addr != NULL && max_cur != new_addr) { 104 return NULL; 105 } 106 return max_cur; 107 } 108 109 void * 110 extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, 111 size_t alignment, bool *zero, bool *commit) { 112 extent_t *gap; 113 114 cassert(have_dss); 115 assert(size > 0); 116 assert(alignment == ALIGNMENT_CEILING(alignment, PAGE)); 117 118 /* 119 * sbrk() uses a signed increment argument, so take care not to 120 * interpret a large allocation request as a negative increment. 121 */ 122 if ((intptr_t)size < 0) { 123 return NULL; 124 } 125 126 gap = extent_alloc(tsdn, arena); 127 if (gap == NULL) { 128 return NULL; 129 } 130 131 extent_dss_extending_start(); 132 if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { 133 /* 134 * The loop is necessary to recover from races with other 135 * threads that are using the DSS for something other than 136 * malloc. 137 */ 138 while (true) { 139 void *max_cur = extent_dss_max_update(new_addr); 140 if (max_cur == NULL) { 141 goto label_oom; 142 } 143 144 /* 145 * Compute how much page-aligned gap space (if any) is 146 * necessary to satisfy alignment. This space can be 147 * recycled for later use. 148 */ 149 void *gap_addr_page = (void *)(PAGE_CEILING( 150 (uintptr_t)max_cur)); 151 void *ret = (void *)ALIGNMENT_CEILING( 152 (uintptr_t)gap_addr_page, alignment); 153 size_t gap_size_page = (uintptr_t)ret - 154 (uintptr_t)gap_addr_page; 155 if (gap_size_page != 0) { 156 extent_init(gap, arena, gap_addr_page, 157 gap_size_page, false, SC_NSIZES, 158 arena_extent_sn_next(arena), 159 extent_state_active, false, true, true, 160 EXTENT_NOT_HEAD); 161 } 162 /* 163 * Compute the address just past the end of the desired 164 * allocation space. 165 */ 166 void *dss_next = (void *)((uintptr_t)ret + size); 167 if ((uintptr_t)ret < (uintptr_t)max_cur || 168 (uintptr_t)dss_next < (uintptr_t)max_cur) { 169 goto label_oom; /* Wrap-around. */ 170 } 171 /* Compute the increment, including subpage bytes. */ 172 void *gap_addr_subpage = max_cur; 173 size_t gap_size_subpage = (uintptr_t)ret - 174 (uintptr_t)gap_addr_subpage; 175 intptr_t incr = gap_size_subpage + size; 176 177 assert((uintptr_t)max_cur + incr == (uintptr_t)ret + 178 size); 179 180 /* Try to allocate. */ 181 void *dss_prev = extent_dss_sbrk(incr); 182 if (dss_prev == max_cur) { 183 /* Success. */ 184 atomic_store_p(&dss_max, dss_next, 185 ATOMIC_RELEASE); 186 extent_dss_extending_finish(); 187 188 if (gap_size_page != 0) { 189 extent_dalloc_gap(tsdn, arena, gap); 190 } else { 191 extent_dalloc(tsdn, arena, gap); 192 } 193 if (!*commit) { 194 *commit = pages_decommit(ret, size); 195 } 196 if (*zero && *commit) { 197 extent_hooks_t *extent_hooks = 198 EXTENT_HOOKS_INITIALIZER; 199 extent_t extent; 200 201 extent_init(&extent, arena, ret, size, 202 size, false, SC_NSIZES, 203 extent_state_active, false, true, 204 true, EXTENT_NOT_HEAD); 205 if (extent_purge_forced_wrapper(tsdn, 206 arena, &extent_hooks, &extent, 0, 207 size)) { 208 memset(ret, 0, size); 209 } 210 } 211 return ret; 212 } 213 /* 214 * Failure, whether due to OOM or a race with a raw 215 * sbrk() call from outside the allocator. 216 */ 217 if (dss_prev == (void *)-1) { 218 /* OOM. */ 219 atomic_store_b(&dss_exhausted, true, 220 ATOMIC_RELEASE); 221 goto label_oom; 222 } 223 } 224 } 225 label_oom: 226 extent_dss_extending_finish(); 227 extent_dalloc(tsdn, arena, gap); 228 return NULL; 229 } 230 231 static bool 232 extent_in_dss_helper(void *addr, void *max) { 233 return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < 234 (uintptr_t)max); 235 } 236 237 bool 238 extent_in_dss(void *addr) { 239 cassert(have_dss); 240 241 return extent_in_dss_helper(addr, atomic_load_p(&dss_max, 242 ATOMIC_ACQUIRE)); 243 } 244 245 bool 246 extent_dss_mergeable(void *addr_a, void *addr_b) { 247 void *max; 248 249 cassert(have_dss); 250 251 if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < 252 (uintptr_t)dss_base) { 253 return true; 254 } 255 256 max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); 257 return (extent_in_dss_helper(addr_a, max) == 258 extent_in_dss_helper(addr_b, max)); 259 } 260 261 void 262 extent_dss_boot(void) { 263 cassert(have_dss); 264 265 dss_base = extent_dss_sbrk(0); 266 atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); 267 atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); 268 atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); 269 } 270 271 /******************************************************************************/ 272