1 #define JEMALLOC_BASE_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 static malloc_mutex_t base_mtx; 8 9 /* 10 * Current pages that are being used for internal memory allocations. These 11 * pages are carved up in cacheline-size quanta, so that there is no chance of 12 * false cache line sharing. 13 */ 14 static void *base_pages; 15 static void *base_next_addr; 16 static void *base_past_addr; /* Addr immediately past base_pages. */ 17 static extent_node_t *base_nodes; 18 19 /******************************************************************************/ 20 /* Function prototypes for non-inline static functions. */ 21 22 static bool base_pages_alloc(size_t minsize); 23 24 /******************************************************************************/ 25 26 static bool 27 base_pages_alloc(size_t minsize) 28 { 29 size_t csize; 30 bool zero; 31 32 assert(minsize != 0); 33 csize = CHUNK_CEILING(minsize); 34 zero = false; 35 base_pages = chunk_alloc(csize, chunksize, true, &zero, 36 chunk_dss_prec_get()); 37 if (base_pages == NULL) 38 return (true); 39 base_next_addr = base_pages; 40 base_past_addr = (void *)((uintptr_t)base_pages + csize); 41 42 return (false); 43 } 44 45 void * 46 base_alloc(size_t size) 47 { 48 void *ret; 49 size_t csize; 50 51 /* Round size up to nearest multiple of the cacheline size. */ 52 csize = CACHELINE_CEILING(size); 53 54 malloc_mutex_lock(&base_mtx); 55 /* Make sure there's enough space for the allocation. */ 56 if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { 57 if (base_pages_alloc(csize)) { 58 malloc_mutex_unlock(&base_mtx); 59 return (NULL); 60 } 61 } 62 /* Allocate. */ 63 ret = base_next_addr; 64 base_next_addr = (void *)((uintptr_t)base_next_addr + csize); 65 malloc_mutex_unlock(&base_mtx); 66 VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); 67 68 return (ret); 69 } 70 71 void * 72 base_calloc(size_t number, size_t size) 73 { 74 void *ret = base_alloc(number * size); 75 76 if (ret != NULL) 77 memset(ret, 0, number * size); 78 79 return (ret); 80 } 81 82 extent_node_t * 83 base_node_alloc(void) 84 { 85 extent_node_t *ret; 86 87 malloc_mutex_lock(&base_mtx); 88 if (base_nodes != NULL) { 89 ret = base_nodes; 90 base_nodes = *(extent_node_t **)ret; 91 malloc_mutex_unlock(&base_mtx); 92 VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); 93 } else { 94 malloc_mutex_unlock(&base_mtx); 95 ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); 96 } 97 98 return (ret); 99 } 100 101 void 102 base_node_dealloc(extent_node_t *node) 103 { 104 105 VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); 106 malloc_mutex_lock(&base_mtx); 107 *(extent_node_t **)node = base_nodes; 108 base_nodes = node; 109 malloc_mutex_unlock(&base_mtx); 110 } 111 112 bool 113 base_boot(void) 114 { 115 116 base_nodes = NULL; 117 if (malloc_mutex_init(&base_mtx)) 118 return (true); 119 120 return (false); 121 } 122 123 void 124 base_prefork(void) 125 { 126 127 malloc_mutex_prefork(&base_mtx); 128 } 129 130 void 131 base_postfork_parent(void) 132 { 133 134 malloc_mutex_postfork_parent(&base_mtx); 135 } 136 137 void 138 base_postfork_child(void) 139 { 140 141 malloc_mutex_postfork_child(&base_mtx); 142 } 143