Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 30) sorted by relevance

12

/freebsd/contrib/jemalloc/src/
H A Darena.c62 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
64 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
271 arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { in arena_slab_reg_alloc() argument
273 arena_slab_data_t *slab_data = extent_slab_data_get(slab); in arena_slab_reg_alloc()
276 assert(extent_nfree_get(slab) > 0); in arena_slab_reg_alloc()
280 ret = (void *)((uintptr_t)extent_addr_get(slab) + in arena_slab_reg_alloc()
282 extent_nfree_dec(slab); in arena_slab_reg_alloc()
287 arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, in arena_slab_reg_alloc_batch() argument
289 arena_slab_data_t *slab_data = extent_slab_data_get(slab); in arena_slab_reg_alloc_batch()
291 assert(extent_nfree_get(slab) >= cnt); in arena_slab_reg_alloc_batch()
[all …]
H A Dextent.c105 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
539 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { in extents_alloc() argument
546 new_addr, size, pad, alignment, slab, szind, zero, commit, false); in extents_alloc()
722 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { in extent_rtree_write_acquired() argument
723 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); in extent_rtree_write_acquired()
726 slab); in extent_rtree_write_acquired()
799 bool slab = extent_slab_get(extent); in extent_register_impl() local
800 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); in extent_register_impl()
801 if (slab) { in extent_register_impl()
894 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, in extent_recycle_extract() argument
[all …]
H A Djemalloc.c2125 alloc_ctx.slab = (usize in imalloc_body()
2136 alloc_ctx.slab = false; in imalloc_body()
2575 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); in ifree()
2619 alloc_ctx.slab = true; in isfree()
2626 &dbg_ctx.slab); in isfree()
2628 assert(dbg_ctx.slab == alloc_ctx.slab); in isfree()
2633 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); in isfree()
2683 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); in je_realloc()
2810 &alloc_ctx.szind, &alloc_ctx.slab); in free_fastpath()
2813 if (!res || !alloc_ctx.slab) { in free_fastpath()
[all …]
/freebsd/contrib/jemalloc/include/jemalloc/internal/
H A Darena_inlines_b.h49 if (unlikely(!alloc_ctx->slab)) { in arena_prof_tctx_get()
69 if (unlikely(!alloc_ctx->slab)) { in arena_prof_tctx_set()
249 bool slab; in arena_dalloc_no_tcache() local
251 true, &szind, &slab); in arena_dalloc_no_tcache()
258 assert(slab == extent_slab_get(extent)); in arena_dalloc_no_tcache()
261 if (likely(slab)) { in arena_dalloc_no_tcache()
297 bool slab; in arena_dalloc() local
301 slab = alloc_ctx->slab; in arena_dalloc()
306 (uintptr_t)ptr, true, &szind, &slab); in arena_dalloc()
315 assert(slab == extent_slab_get(extent)); in arena_dalloc()
[all …]
H A Drtree.h281 rtree_leaf_elm_t *elm, bool slab) { in rtree_leaf_elm_slab_write() argument
287 (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); in rtree_leaf_elm_slab_write()
290 atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); in rtree_leaf_elm_slab_write()
296 rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) { in rtree_leaf_elm_write() argument
300 ((uintptr_t)slab); in rtree_leaf_elm_write()
303 rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); in rtree_leaf_elm_write()
315 rtree_leaf_elm_t *elm, szind_t szind, bool slab) { in rtree_leaf_elm_szind_slab_update() argument
316 assert(!slab || szind < SC_NBINS); in rtree_leaf_elm_szind_slab_update()
322 rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); in rtree_leaf_elm_szind_slab_update()
387 extent_t *extent, szind_t szind, bool slab) { in rtree_write() argument
[all …]
H A Dextent_inlines.h331 extent_slab_set(extent_t *extent, bool slab) { in extent_slab_set() argument
333 ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); in extent_slab_set()
368 bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, in extent_init() argument
370 assert(addr == PAGE_ADDR2BASE(addr) || !slab); in extent_init()
375 extent_slab_set(extent, slab); in extent_init()
H A Dextent_externs.h40 size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
51 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
H A Darena_structs_b.h229 bool slab; member
H A Darena_externs.h33 size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
/freebsd/contrib/unbound/util/storage/
H A Dslabhash.c243 size_t slab, cnt = 0; in count_slabhash_entries() local
245 for(slab=0; slab<sh->size; slab++) { in count_slabhash_entries()
246 lock_quick_lock(&sh->array[slab]->lock); in count_slabhash_entries()
247 cnt += sh->array[slab]->num; in count_slabhash_entries()
248 lock_quick_unlock(&sh->array[slab]->lock); in count_slabhash_entries()
255 size_t slab, cnt = 0, max_collisions = 0; in get_slabhash_stats() local
257 for(slab=0; slab<sh->size; slab++) { in get_slabhash_stats()
258 lock_quick_lock(&sh->array[slab]->lock); in get_slabhash_stats()
259 cnt += sh->array[slab]->num; in get_slabhash_stats()
260 if (max_collisions < sh->array[slab]->max_collisions) { in get_slabhash_stats()
[all …]
/freebsd/sys/vm/
H A Duma_int.h404 slab_tohashslab(uma_slab_t slab) in slab_tohashslab() argument
407 return (__containerof(slab, struct uma_hash_slab, uhs_slab)); in slab_tohashslab()
411 slab_data(uma_slab_t slab, uma_keg_t keg) in slab_data() argument
415 return ((void *)((uintptr_t)slab - keg->uk_pgoff)); in slab_data()
417 return (slab_tohashslab(slab)->uhs_data); in slab_data()
421 slab_item(uma_slab_t slab, uma_keg_t keg, int index) in slab_item() argument
425 data = (uintptr_t)slab_data(slab, keg); in slab_item()
430 slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item) in slab_item_index() argument
434 data = (uintptr_t)slab_data(slab, keg); in slab_item_index()
606 uma_hash_slab_t slab; in hash_sfind() local
[all …]
H A Duma_core.c41 * The basic ideas stem from similar slab/zone based allocators whose algorithms
121 * On INVARIANTS builds, the slab contains a second bitset of the same size,
133 * One zone is for slab headers that can represent a larger number of items,
324 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
325 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
349 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
353 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
354 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
386 "UMA may choose larger slab sizes for better efficiency");
389 * Select the slab zon
1286 uma_hash_slab_t slab; hash_expand() local
1593 keg_free_slab(uma_keg_t keg,uma_slab_t slab,int start) keg_free_slab() argument
1637 uma_slab_t slab, tmp; keg_drain_domain() local
1757 uma_slab_t slab; keg_alloc_slab() local
2193 slab_dbg_bits(uma_slab_t slab,uma_keg_t keg) slab_dbg_bits() argument
3949 uma_slab_t slab; keg_first_slab() local
3982 uma_slab_t slab; keg_fetch_free_slab() local
4003 uma_slab_t slab; keg_fetch_slab() local
4078 slab_alloc_item(uma_keg_t keg,uma_slab_t slab) slab_alloc_item() argument
4111 uma_slab_t slab; zone_import() local
4834 slab_free_item(uma_zone_t zone,uma_slab_t slab,void * item) slab_free_item() argument
4868 uma_slab_t slab; zone_release() local
5242 uma_slab_t slab; global() local
5739 uma_slab_t slab; global() local
5804 uma_dbg_alloc(uma_zone_t zone,uma_slab_t slab,void * item) global() argument
5830 uma_dbg_free(uma_zone_t zone,uma_slab_t slab,void * item) global() argument
[all...]
H A Dvm_page.h232 void *slab; member
/freebsd/contrib/unbound/validator/
H A Dval_kcache.c62 kcache->slab = slabhash_create(numtables, start_size, maxmem, in key_cache_create()
65 if(!kcache->slab) { in key_cache_create()
78 slabhash_delete(kcache->slab); in key_cache_delete()
90 slabhash_insert(kcache->slab, k->entry.hash, &k->entry, in key_cache_insert()
116 e = slabhash_lookup(kcache->slab, lookfor.entry.hash, &lookfor, wr); in key_cache_search()
154 return sizeof(*kcache) + slabhash_get_mem(kcache->slab); in key_cache_get_mem()
166 slabhash_remove(kcache->slab, lookfor.entry.hash, &lookfor); in key_cache_remove()
H A Dval_kcache.h56 struct slabhash* slab; member
/freebsd/sys/kern/
H A Dkern_malloc.c469 contigmalloc_size(uma_slab_t slab) in contigmalloc_size() argument
473 KASSERT(IS_CONTIG_MALLOC(slab), in contigmalloc_size()
474 ("%s: called on non-contigmalloc allocation: %p", __func__, slab)); in contigmalloc_size()
475 va = (uintptr_t)slab; in contigmalloc_size()
583 malloc_large_size(uma_slab_t slab) in malloc_large_size() argument
587 va = (uintptr_t)slab; in malloc_large_size()
588 KASSERT(IS_MALLOC_LARGE(slab), in malloc_large_size()
589 ("%s: called on non-malloc_large allocation: %p", __func__, slab)); in malloc_large_size()
909 uma_slab_t slab; in _free() local
920 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); in _free()
[all …]
/freebsd/contrib/bc/src/
H A Dvector.c500 * Initializes a single slab.
501 * @param s The slab to initialize.
511 * Adds a string to a slab and returns a pointer to it, or NULL if it could not
513 * @param s The slab to add to.
516 * @return A pointer to the new string in the slab, or NULL if it could not
541 bc_slab_free(void* slab) in bc_slab_free() argument
543 free(((BcSlab*) slab)->s); in bc_slab_free()
549 BcSlab* slab; in bc_slabvec_init() local
555 // We always want to have at least one slab. in bc_slabvec_init()
556 slab in bc_slabvec_init()
565 BcSlab slab; bc_slabvec_strdup() local
[all...]
/freebsd/contrib/netbsd-tests/sys/uvm/
H A Dt_uvm_physseg.c474 struct vm_page *pgs, *slab = malloc(sizeof(struct vm_page) * (npages1 in ATF_TC_BODY() local
495 uvm_page_init_fake(slab, npages1 + npages2 + npages3); in ATF_TC_BODY()
514 ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3)); in ATF_TC_BODY()
520 ATF_REQUIRE(pgs < slab || pgs > (slab + npages1 in ATF_TC_BODY()
541 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2 + npages3)); in ATF_TC_BODY() local
577 uvm_page_init_fake(slab, npages1 + npages2 + npages3); in ATF_TC_BODY()
681 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2)); in ATF_TC_BODY() local
698 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
775 struct vm_page *slab, *pgs; in ATF_TC_BODY() local
781 slab = malloc(sizeof(struct vm_page) * npages * 2); in ATF_TC_BODY()
[all …]
H A Dt_uvm_physseg_load.c543 struct vm_page *slab = malloc(sizeof(struct vm_page) * in ATF_TC_BODY() local
553 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
592 struct vm_page *slab = malloc(sizeof(struct vm_page) * in ATF_TC_BODY() local
602 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
641 struct vm_page *slab = malloc(sizeof(struct vm_page) in ATF_TC_BODY() local
651 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
690 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2)); in ATF_TC_BODY() local
699 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
/freebsd/contrib/unbound/daemon/
H A Dcachedump.c126 size_t slab; in dump_rrset_cache() local
128 for(slab=0; slab<r->table.size; slab++) { in dump_rrset_cache()
129 lock_quick_lock(&r->table.array[slab]->lock); in dump_rrset_cache()
130 if(!dump_rrset_lruhash(ssl, r->table.array[slab], in dump_rrset_cache()
132 lock_quick_unlock(&r->table.array[slab]->lock); in dump_rrset_cache()
135 lock_quick_unlock(&r->table.array[slab]->lock); in dump_rrset_cache()
283 size_t slab; in dump_msg_cache() local
285 for(slab=0; slab<sh->size; slab++) { in dump_msg_cache()
286 lock_quick_lock(&sh->array[slab]->lock); in dump_msg_cache()
287 if(!dump_msg_lruhash(ssl, worker, sh->array[slab])) { in dump_msg_cache()
[all …]
H A Dstats.c302 s->svr.key_cache_count = (long long)count_slabhash_entries(worker->env.key_cache->slab); in server_stats_compile()
H A Dremote.c1931 slabhash_traverse(worker->env.key_cache->slab, 1, in do_flush_zone()
2011 slabhash_traverse(worker->env.key_cache->slab, 1, in do_flush_bogus()
2097 slabhash_traverse(worker->env.key_cache->slab, 1, in do_flush_negative()
/freebsd/tools/test/stress2/misc/
H A Duma_zalloc_arg.sh145 @@ -292,4 +294,143 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
146 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
293 @@ -427,6 +427,9 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
/freebsd/contrib/bc/include/
H A Dvector.h393 /// A slab for allocating strings.
399 /// How many bytes of the slab are taken.
405 * Frees a slab. This is a destructor.
406 * @param slab The slab as a void pointer.
409 bc_slab_free(void* slab);
412 * Initializes a slab vector.
419 * Duplicates the string using slabs in the slab vector.
420 * @param v The slab vector.
422 * @return A pointer to the duplicated string, owned by the slab vecto
[all...]
/freebsd/sys/conf/
H A Doptions968 # the uma slab allocator.

12