Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 34) sorted by relevance

12

/freebsd/contrib/unbound/testcode/
H A Dunitinfra.c66 static void test_keep_probing(struct infra_cache* slab, in test_keep_probing() argument
79 slab->infra_keep_probing = keep_probing; in test_keep_probing()
83 unit_assert( infra_host(slab, &one, onelen, zone, zonelen, in test_keep_probing()
88 unit_assert( infra_rtt_update(slab, &one, onelen, zone, zonelen, in test_keep_probing()
90 unit_assert( infra_host(slab, &one, onelen, zone, zonelen, in test_keep_probing()
97 unit_assert( (d=infra_lookup_host(slab, &one, onelen, zone, zonelen, 0, *now, &k)) ); in test_keep_probing()
108 unit_assert( infra_get_lame_rtt(slab, &one, onelen, zone, zonelen, in test_keep_probing()
121 struct infra_cache* slab; in infra_test() local
135 slab = infra_create(cfg); in infra_test()
137 unit_assert( infra_host(slab, &one, onelen, zone, zonelen, now, in infra_test()
[all …]
/freebsd/contrib/jemalloc/src/
H A Darena.c65 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
227 arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) { in arena_stats_merge()
229 slab_data_t *slab_data = edata_slab_data_get(slab); in arena_stats_merge()
232 assert(edata_nfree_get(slab) > 0); in arena_stats_merge()
236 ret = (void *)((uintptr_t)edata_addr_get(slab) + in arena_stats_merge()
238 edata_nfree_dec(slab); in arena_stats_merge()
243 arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info, in arena_stats_merge()
245 slab_data_t *slab_data = edata_slab_data_get(slab); in arena_stats_merge()
247 assert(edata_nfree_get(slab) >= cnt); in arena_stats_merge()
254 *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) in arena_stats_merge()
271 arena_slab_reg_alloc(extent_t * slab,const bin_info_t * bin_info) arena_slab_reg_alloc() argument
287 arena_slab_reg_alloc_batch(extent_t * slab,const bin_info_t * bin_info,unsigned cnt,void ** ptrs) arena_slab_reg_alloc_batch() argument
338 arena_slab_regind(extent_t * slab,szind_t binind,const void * ptr) arena_slab_regind() argument
359 arena_slab_reg_dalloc(extent_t * slab,arena_slab_data_t * slab_data,void * ptr) arena_slab_reg_dalloc() argument
1005 arena_slab_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * slab) arena_slab_dalloc() argument
1013 arena_bin_slabs_nonfull_insert(bin_t * bin,extent_t * slab) arena_bin_slabs_nonfull_insert() argument
1022 arena_bin_slabs_nonfull_remove(bin_t * bin,extent_t * slab) arena_bin_slabs_nonfull_remove() argument
1031 extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); arena_bin_slabs_nonfull_tryget() local
1043 arena_bin_slabs_full_insert(arena_t * arena,bin_t * bin,extent_t * slab) arena_bin_slabs_full_insert() argument
1057 arena_bin_slabs_full_remove(arena_t * arena,bin_t * bin,extent_t * slab) arena_bin_slabs_full_remove() argument
1066 extent_t *slab; arena_bin_reset() local
1209 extent_t *slab; arena_slab_alloc_hard() local
1238 extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, arena_slab_alloc() local
1268 extent_t *slab; arena_bin_nonfull_slab_get() local
1312 extent_t *slab; arena_bin_malloc_hard() local
1396 extent_t *slab; arena_tcache_fill_small() local
1464 extent_t *slab; arena_malloc_small() local
1622 arena_dissociate_bin_slab(arena_t * arena,extent_t * slab,bin_t * bin) arena_dissociate_bin_slab() argument
1644 arena_dalloc_bin_slab(tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin) arena_dalloc_bin_slab() argument
1659 arena_bin_lower_slab(tsdn_t * tsdn,arena_t * arena,extent_t * slab,bin_t * bin) arena_bin_lower_slab() argument
1687 arena_dalloc_bin_locked_impl(tsdn_t * tsdn,arena_t * arena,bin_t * bin,szind_t binind,extent_t * slab,void * ptr,bool junked) arena_dalloc_bin_locked_impl() argument
[all...]
H A Dextent.c329 /* slab */ false)) { in extents_stats_add()
1010 size, /* slab */ false, SC_NSIZES, extent_sn_next(pac), in extent_split_interior()
1197 /* slab */ false, SC_NSIZES, edata_sn_get(edata), in extent_recycle()
539 extents_alloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit) extents_alloc() argument
722 extent_rtree_write_acquired(tsdn_t * tsdn,rtree_leaf_elm_t * elm_a,rtree_leaf_elm_t * elm_b,extent_t * extent,szind_t szind,bool slab) extent_rtree_write_acquired() argument
799 bool slab = extent_slab_get(extent); extent_register_impl() local
894 extent_recycle_extract(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,bool growing_retained) extent_recycle_extract() argument
986 extent_split_interior(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extent_t ** extent,extent_t ** lead,extent_t ** trail,extent_t ** to_leak,extent_t ** to_salvage,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool growing_retained) extent_split_interior() argument
1059 extent_recycle_split(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,extent_t * extent,bool growing_retained) extent_recycle_split() argument
1130 extent_recycle(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit,bool growing_retained) extent_recycle() argument
1299 extent_grow_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit) extent_grow_retained() argument
1465 extent_alloc_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit) extent_alloc_retained() argument
1494 extent_alloc_wrapper_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit) extent_alloc_wrapper_hard() argument
1533 extent_alloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit) extent_alloc_wrapper() argument
H A Djemalloc.c1805 * malloc_conf_init(), since any slab size tweaking will need to be done in malloc_init_hard()
2559 alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS);
2565 alloc_ctx.slab = false; in ifree()
2916 if (alloc_ctx->slab != dbg_ctx.slab) { in JEMALLOC_ATTR()
2919 "mismatch in slab bit"); in JEMALLOC_ATTR()
2942 alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
2947 * usize can be trusted to determine szind and slab.
2950 alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
2956 /* Small alloc may have !slab (sample
[all...]
/freebsd/contrib/jemalloc/include/jemalloc/internal/
H A Darena_inlines_b.h56 } else if (unlikely(!(is_slab = alloc_ctx->slab))) {
88 if (unlikely(!alloc_ctx->slab)) { in arena_prof_alloc_time_get()
204 /* Only slab members should be looked up via interior pointers. */ in arena_vsalloc()
269 assert(alloc_ctx.slab == edata_slab_get(edata));
272 if (likely(alloc_ctx.slab)) { in arena_dalloc_large()
326 assert(alloc_ctx.slab == edata_slab_get(edata));
329 if (likely(alloc_ctx.slab)) { in arena_sdalloc_no_tcache()
348 * object, so base szind and slab on the given size. in arena_sdalloc_no_tcache()
351 alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS); in arena_sdalloc_no_tcache()
360 || alloc_ctx.slab in arena_sdalloc_no_tcache()
249 bool slab; arena_dalloc_no_tcache() local
297 bool slab; arena_dalloc() local
333 bool slab; arena_sdalloc_no_tcache() local
383 bool slab; arena_sdalloc() local
[all...]
H A Drtree.h48 bool slab;
66 * memory address bits, the index, edata, and slab fields are packed as
73 * b: slab
81 * From high to low bits: szind (8 bits), state (4 bits), is_head, slab
193 uintptr_t slab_bits = (uintptr_t)contents.metadata.slab; in rtree_leaf_elm_bits_extent_get()
209 contents.metadata.slab = (bool)(bits & 1);
249 contents.metadata.slab = (bool)(metadata_bits & 1); in rtree_leaf_elm_extent_write()
272 *additional = (unsigned)contents.metadata.slab in rtree_leaf_elm_szind_write()
535 contents.metadata.slab = false;
547 contents.metadata.slab
281 rtree_leaf_elm_slab_write(tsdn_t * tsdn,rtree_t * rtree,rtree_leaf_elm_t * elm,bool slab) rtree_leaf_elm_slab_write() argument
296 rtree_leaf_elm_write(tsdn_t * tsdn,rtree_t * rtree,rtree_leaf_elm_t * elm,extent_t * extent,szind_t szind,bool slab) rtree_leaf_elm_write() argument
315 rtree_leaf_elm_szind_slab_update(tsdn_t * tsdn,rtree_t * rtree,rtree_leaf_elm_t * elm,szind_t szind,bool slab) rtree_leaf_elm_szind_slab_update() argument
387 rtree_write(tsdn_t * tsdn,rtree_t * rtree,rtree_ctx_t * rtree_ctx,uintptr_t key,extent_t * extent,szind_t szind,bool slab) rtree_write() argument
512 rtree_szind_slab_update(tsdn_t * tsdn,rtree_t * rtree,rtree_ctx_t * rtree_ctx,uintptr_t key,szind_t szind,bool slab) rtree_szind_slab_update() argument
[all...]
H A Dextent_inlines.h
H A Dextent_externs.h
H A Darena_structs_b.h
H A Darena_externs.h74 void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
77 edata_t *slab, bin_t *bin);
79 edata_t *slab, bin_t *bin);
H A Djemalloc_internal_defs.h.in194 /* Maximum number of regions in a slab. */
/freebsd/contrib/unbound/util/storage/
H A Dslabhash.c243 size_t slab, cnt = 0; in count_slabhash_entries() local
245 for(slab=0; slab<sh->size; slab++) { in count_slabhash_entries()
246 lock_quick_lock(&sh->array[slab]->lock); in count_slabhash_entries()
247 cnt += sh->array[slab]->num; in count_slabhash_entries()
248 lock_quick_unlock(&sh->array[slab]->lock); in count_slabhash_entries()
255 size_t slab, cnt = 0, max_collisions = 0; in get_slabhash_stats() local
257 for(slab=0; slab<s in get_slabhash_stats()
[all...]
/freebsd/sys/vm/
H A Duma_int.h404 slab_tohashslab(uma_slab_t slab) in slab_tohashslab() argument
407 return (__containerof(slab, struct uma_hash_slab, uhs_slab)); in slab_tohashslab()
411 slab_data(uma_slab_t slab, uma_keg_t keg) in slab_data() argument
415 return ((void *)((uintptr_t)slab - keg->uk_pgoff)); in slab_data()
417 return (slab_tohashslab(slab)->uhs_data); in slab_data()
421 slab_item(uma_slab_t slab, uma_keg_t keg, int index) in slab_item() argument
425 data = (uintptr_t)slab_data(slab, keg); in slab_item()
430 slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item) in slab_item_index() argument
434 data = (uintptr_t)slab_data(slab, keg); in slab_item_index()
606 uma_hash_slab_t slab; in hash_sfind() local
[all …]
H A Duma_core.c323 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
324 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
348 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
352 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
353 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
1285 uma_hash_slab_t slab; in hash_expand() local
1302 slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]); in hash_expand()
1303 LIST_REMOVE(slab, uhs_hlink); in hash_expand()
1304 hval = UMA_HASH(newhash, slab->uhs_data); in hash_expand()
1306 slab, uhs_hlink); in hash_expand()
[all …]
H A Dvm_page.h228 void *slab; member
/freebsd/contrib/unbound/validator/
H A Dval_kcache.c62 kcache->slab = slabhash_create(numtables, start_size, maxmem, in key_cache_create()
65 if(!kcache->slab) { in key_cache_create()
78 slabhash_delete(kcache->slab); in key_cache_delete()
90 slabhash_insert(kcache->slab, k->entry.hash, &k->entry, in key_cache_insert()
116 e = slabhash_lookup(kcache->slab, lookfor.entry.hash, &lookfor, wr); in key_cache_search()
154 return sizeof(*kcache) + slabhash_get_mem(kcache->slab); in key_cache_get_mem()
166 slabhash_remove(kcache->slab, lookfor.entry.hash, &lookfor); in key_cache_remove()
H A Dval_kcache.h56 struct slabhash* slab; member
/freebsd/sys/kern/
H A Dkern_malloc.c469 contigmalloc_size(uma_slab_t slab) in contigmalloc_size() argument
473 KASSERT(IS_CONTIG_MALLOC(slab), in contigmalloc_size()
474 ("%s: called on non-contigmalloc allocation: %p", __func__, slab)); in contigmalloc_size()
475 va = (uintptr_t)slab; in contigmalloc_size()
603 malloc_large_size(uma_slab_t slab) in malloc_large_size() argument
607 va = (uintptr_t)slab; in malloc_large_size()
608 KASSERT(IS_MALLOC_LARGE(slab), in malloc_large_size()
609 ("%s: called on non-malloc_large allocation: %p", __func__, slab)); in malloc_large_size()
932 uma_slab_t slab; in _free() local
943 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); in _free()
[all …]
/freebsd/contrib/bc/src/
H A Dvector.c541 bc_slab_free(void* slab) in bc_slab_free() argument
543 free(((BcSlab*) slab)->s); in bc_slab_free()
549 BcSlab* slab; in bc_slabvec_init() local
556 slab = bc_vec_pushEmpty(v); in bc_slabvec_init()
557 bc_slab_init(slab); in bc_slabvec_init()
565 BcSlab slab; in bc_slabvec_strdup() local
580 slab.len = SIZE_MAX; in bc_slabvec_strdup()
581 slab.s = bc_vm_strdup(str); in bc_slabvec_strdup()
584 bc_vec_pushAt(v, &slab, v->len - 1); in bc_slabvec_strdup()
586 return slab.s; in bc_slabvec_strdup()
/freebsd/contrib/netbsd-tests/sys/uvm/
H A Dt_uvm_physseg.c474 struct vm_page *pgs, *slab = malloc(sizeof(struct vm_page) * (npages1 in ATF_TC_BODY() local
495 uvm_page_init_fake(slab, npages1 + npages2 + npages3); in ATF_TC_BODY()
514 ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3)); in ATF_TC_BODY()
520 ATF_REQUIRE(pgs < slab || pgs > (slab + npages1 in ATF_TC_BODY()
541 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2 + npages3)); in ATF_TC_BODY() local
577 uvm_page_init_fake(slab, npages1 + npages2 + npages3); in ATF_TC_BODY()
681 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2)); in ATF_TC_BODY() local
698 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
775 struct vm_page *slab, *pgs; in ATF_TC_BODY() local
781 slab = malloc(sizeof(struct vm_page) * npages * 2); in ATF_TC_BODY()
[all …]
H A Dt_uvm_physseg_load.c543 struct vm_page *slab = malloc(sizeof(struct vm_page) * in ATF_TC_BODY() local
553 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
592 struct vm_page *slab = malloc(sizeof(struct vm_page) * in ATF_TC_BODY() local
602 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
641 struct vm_page *slab = malloc(sizeof(struct vm_page) in ATF_TC_BODY() local
651 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
690 struct vm_page *slab = malloc(sizeof(struct vm_page) * (npages1 + npages2)); in ATF_TC_BODY() local
699 uvm_page_init_fake(slab, npages1 + npages2); in ATF_TC_BODY()
/freebsd/tools/test/stress2/misc/
H A Duma_zalloc_arg.sh145 @@ -292,4 +294,143 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
146 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
293 @@ -427,6 +427,9 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
/freebsd/contrib/unbound/daemon/
H A Dcachedump.c244 size_t slab; in dump_slabhash() local
245 for(slab=0; slab<sh->size; slab++) { in dump_slabhash()
246 if(!dump_lruhash(sh->array[slab], func, ssl, arg)) in dump_slabhash()
H A Dstats.c309 s->svr.key_cache_count = (long long)count_slabhash_entries(worker->env.key_cache->slab); in server_stats_compile()
/freebsd/contrib/bc/include/
H A Dvector.h409 bc_slab_free(void* slab);

12