Lines Matching refs:cp

1222 	kmem_cache_t *cp;  in kmem_cache_applyall()  local
1225 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_cache_applyall()
1226 cp = list_next(&kmem_caches, cp)) in kmem_cache_applyall()
1228 (void) taskq_dispatch(tq, (task_func_t *)func, cp, in kmem_cache_applyall()
1231 func(cp); in kmem_cache_applyall()
1238 kmem_cache_t *cp; in kmem_cache_applyall_id() local
1241 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_cache_applyall_id()
1242 cp = list_next(&kmem_caches, cp)) { in kmem_cache_applyall_id()
1243 if (!(cp->cache_cflags & KMC_IDENTIFIER)) in kmem_cache_applyall_id()
1246 (void) taskq_dispatch(tq, (task_func_t *)func, cp, in kmem_cache_applyall_id()
1249 func(cp); in kmem_cache_applyall_id()
1258 kmem_findslab(kmem_cache_t *cp, void *buf) in kmem_findslab() argument
1262 mutex_enter(&cp->cache_lock); in kmem_findslab()
1263 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL; in kmem_findslab()
1264 sp = list_next(&cp->cache_complete_slabs, sp)) { in kmem_findslab()
1266 mutex_exit(&cp->cache_lock); in kmem_findslab()
1270 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL; in kmem_findslab()
1271 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) { in kmem_findslab()
1273 mutex_exit(&cp->cache_lock); in kmem_findslab()
1277 mutex_exit(&cp->cache_lock); in kmem_findslab()
1287 kmem_cache_t *cp = cparg; in kmem_error() local
1296 sp = kmem_findslab(cp, buf); in kmem_error()
1298 for (cp = list_tail(&kmem_caches); cp != NULL; in kmem_error()
1299 cp = list_prev(&kmem_caches, cp)) { in kmem_error()
1300 if ((sp = kmem_findslab(cp, buf)) != NULL) in kmem_error()
1306 cp = NULL; in kmem_error()
1309 if (cp != cparg) in kmem_error()
1313 (uintptr_t)sp->slab_base) % cp->cache_chunksize; in kmem_error()
1316 if (cp->cache_flags & KMF_BUFTAG) in kmem_error()
1317 btp = KMEM_BUFTAG(cp, buf); in kmem_error()
1318 if (cp->cache_flags & KMF_HASH) { in kmem_error()
1319 mutex_enter(&cp->cache_lock); in kmem_error()
1320 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) in kmem_error()
1323 mutex_exit(&cp->cache_lock); in kmem_error()
1326 if (kmem_findslab(cp->cache_bufctl_cache, bcp) == in kmem_error()
1339 kmem_panic_info.kmp_realcache = cp; in kmem_error()
1349 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_error()
1383 printf("buffer was allocated from %s,\n", cp->cache_name); in kmem_error()
1402 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) && in kmem_error()
1412 (void *)sp, cp->cache_name); in kmem_error()
1500 #define KMEM_AUDIT(lp, cp, bcp) \ argument
1510 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp, in kmem_log_event() argument
1518 bca.bc_cache = cp; in kmem_log_event()
1519 KMEM_AUDIT(lp, cp, &bca); in kmem_log_event()
1526 kmem_slab_create(kmem_cache_t *cp, int kmflag) in kmem_slab_create() argument
1528 size_t slabsize = cp->cache_slabsize; in kmem_slab_create()
1529 size_t chunksize = cp->cache_chunksize; in kmem_slab_create()
1530 int cache_flags = cp->cache_flags; in kmem_slab_create()
1535 vmem_t *vmp = cp->cache_arena; in kmem_slab_create()
1537 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_slab_create()
1539 color = cp->cache_color + cp->cache_align; in kmem_slab_create()
1540 if (color > cp->cache_maxcolor) in kmem_slab_create()
1541 color = cp->cache_mincolor; in kmem_slab_create()
1542 cp->cache_color = color; in kmem_slab_create()
1557 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH)); in kmem_slab_create()
1558 if (!(cp->cache_cflags & KMC_NOTOUCH)) in kmem_slab_create()
1566 sp = KMEM_SLAB(cp, slab); in kmem_slab_create()
1570 sp->slab_cache = cp; in kmem_slab_create()
1582 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag); in kmem_slab_create()
1589 bcap->bc_cache = cp; in kmem_slab_create()
1594 bcp = KMEM_BUFCTL(cp, buf); in kmem_slab_create()
1597 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_slab_create()
1603 cp->cache_verify); in kmem_slab_create()
1611 kmem_log_event(kmem_slab_log, cp, sp, slab); in kmem_slab_create()
1619 kmem_cache_free(cp->cache_bufctl_cache, bcp); in kmem_slab_create()
1629 kmem_log_event(kmem_failure_log, cp, NULL, NULL); in kmem_slab_create()
1630 atomic_inc_64(&cp->cache_alloc_fail); in kmem_slab_create()
1639 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp) in kmem_slab_destroy() argument
1641 vmem_t *vmp = cp->cache_arena; in kmem_slab_destroy()
1644 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_slab_destroy()
1647 if (cp->cache_flags & KMF_HASH) { in kmem_slab_destroy()
1651 kmem_cache_free(cp->cache_bufctl_cache, bcp); in kmem_slab_destroy()
1655 vmem_free(vmp, slab, cp->cache_slabsize); in kmem_slab_destroy()
1659 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill) in kmem_slab_alloc_impl() argument
1665 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_alloc_impl()
1672 (sp == avl_first(&cp->cache_partial_slabs)))); in kmem_slab_alloc_impl()
1673 ASSERT(sp->slab_cache == cp); in kmem_slab_alloc_impl()
1675 cp->cache_slab_alloc++; in kmem_slab_alloc_impl()
1676 cp->cache_bufslab--; in kmem_slab_alloc_impl()
1682 if (cp->cache_flags & KMF_HASH) { in kmem_slab_alloc_impl()
1687 hash_bucket = KMEM_HASH(cp, buf); in kmem_slab_alloc_impl()
1690 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { in kmem_slab_alloc_impl()
1691 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_slab_alloc_impl()
1694 buf = KMEM_BUF(cp, bcp); in kmem_slab_alloc_impl()
1705 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_alloc_impl()
1710 list_insert_head(&cp->cache_complete_slabs, sp); in kmem_slab_alloc_impl()
1711 cp->cache_complete_slab_count++; in kmem_slab_alloc_impl()
1721 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) && in kmem_slab_alloc_impl()
1722 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) { in kmem_slab_alloc_impl()
1723 kmem_slab_prefill(cp, sp); in kmem_slab_alloc_impl()
1728 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_alloc_impl()
1736 ASSERT(!avl_update(&cp->cache_partial_slabs, sp)); in kmem_slab_alloc_impl()
1744 kmem_slab_alloc(kmem_cache_t *cp, int kmflag) in kmem_slab_alloc() argument
1750 mutex_enter(&cp->cache_lock); in kmem_slab_alloc()
1751 test_destructor = (cp->cache_slab_alloc == 0); in kmem_slab_alloc()
1752 sp = avl_first(&cp->cache_partial_slabs); in kmem_slab_alloc()
1754 ASSERT(cp->cache_bufslab == 0); in kmem_slab_alloc()
1759 mutex_exit(&cp->cache_lock); in kmem_slab_alloc()
1760 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) { in kmem_slab_alloc()
1763 mutex_enter(&cp->cache_lock); in kmem_slab_alloc()
1764 cp->cache_slab_create++; in kmem_slab_alloc()
1765 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) in kmem_slab_alloc()
1766 cp->cache_bufmax = cp->cache_buftotal; in kmem_slab_alloc()
1767 cp->cache_bufslab += sp->slab_chunks; in kmem_slab_alloc()
1770 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE); in kmem_slab_alloc()
1771 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == in kmem_slab_alloc()
1772 (cp->cache_complete_slab_count + in kmem_slab_alloc()
1773 avl_numnodes(&cp->cache_partial_slabs) + in kmem_slab_alloc()
1774 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); in kmem_slab_alloc()
1775 mutex_exit(&cp->cache_lock); in kmem_slab_alloc()
1777 if (test_destructor && cp->cache_destructor != NULL) { in kmem_slab_alloc()
1783 if ((cp->cache_constructor == NULL) || in kmem_slab_alloc()
1784 cp->cache_constructor(buf, cp->cache_private, in kmem_slab_alloc()
1786 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_alloc()
1789 cp->cache_bufsize); in kmem_slab_alloc()
1790 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_slab_alloc()
1791 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_slab_alloc()
1804 kmem_slab_free(kmem_cache_t *cp, void *buf) in kmem_slab_free() argument
1811 mutex_enter(&cp->cache_lock); in kmem_slab_free()
1812 cp->cache_slab_free++; in kmem_slab_free()
1814 if (cp->cache_flags & KMF_HASH) { in kmem_slab_free()
1818 prev_bcpp = KMEM_HASH(cp, buf); in kmem_slab_free()
1825 cp->cache_lookup_depth++; in kmem_slab_free()
1829 bcp = KMEM_BUFCTL(cp, buf); in kmem_slab_free()
1830 sp = KMEM_SLAB(cp, buf); in kmem_slab_free()
1833 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) { in kmem_slab_free()
1834 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1835 kmem_error(KMERR_BADADDR, cp, buf); in kmem_slab_free()
1847 kmem_slab_move_yes(cp, sp, buf); in kmem_slab_free()
1850 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { in kmem_slab_free()
1851 if (cp->cache_flags & KMF_CONTENTS) in kmem_slab_free()
1854 cp->cache_contents); in kmem_slab_free()
1855 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_slab_free()
1861 cp->cache_bufslab++; in kmem_slab_free()
1870 list_remove(&cp->cache_complete_slabs, sp); in kmem_slab_free()
1871 cp->cache_complete_slab_count--; in kmem_slab_free()
1873 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1876 cp->cache_buftotal -= sp->slab_chunks; in kmem_slab_free()
1877 cp->cache_bufslab -= sp->slab_chunks; in kmem_slab_free()
1889 if (cp->cache_defrag == NULL || in kmem_slab_free()
1890 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) && in kmem_slab_free()
1892 cp->cache_slab_destroy++; in kmem_slab_free()
1893 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1894 kmem_slab_destroy(cp, sp); in kmem_slab_free()
1896 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_slab_free()
1910 cp->cache_defrag->kmd_deadcount++; in kmem_slab_free()
1911 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1920 list_remove(&cp->cache_complete_slabs, sp); in kmem_slab_free()
1921 cp->cache_complete_slab_count--; in kmem_slab_free()
1922 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1925 if (avl_update_gt(&cp->cache_partial_slabs, sp)) { in kmem_slab_free()
1931 (void) avl_update_gt(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1935 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == in kmem_slab_free()
1936 (cp->cache_complete_slab_count + in kmem_slab_free()
1937 avl_numnodes(&cp->cache_partial_slabs) + in kmem_slab_free()
1938 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); in kmem_slab_free()
1939 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1946 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct, in kmem_cache_alloc_debug() argument
1949 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_cache_alloc_debug()
1954 kmem_error(KMERR_BADBUFTAG, cp, buf); in kmem_cache_alloc_debug()
1960 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { in kmem_cache_alloc_debug()
1961 kmem_error(KMERR_BADBUFCTL, cp, buf); in kmem_cache_alloc_debug()
1965 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_alloc_debug()
1966 if (!construct && (cp->cache_flags & KMF_LITE)) { in kmem_cache_alloc_debug()
1968 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_cache_alloc_debug()
1971 if (cp->cache_constructor != NULL) in kmem_cache_alloc_debug()
1979 cp->cache_verify)) { in kmem_cache_alloc_debug()
1980 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_cache_alloc_debug()
1987 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 && in kmem_cache_alloc_debug()
1990 kmem_log_event(kmem_failure_log, cp, NULL, NULL); in kmem_cache_alloc_debug()
1991 if (!construct && cp->cache_destructor != NULL) in kmem_cache_alloc_debug()
1992 cp->cache_destructor(buf, cp->cache_private); in kmem_cache_alloc_debug()
1997 if (mtbf || (construct && cp->cache_constructor != NULL && in kmem_cache_alloc_debug()
1998 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) { in kmem_cache_alloc_debug()
1999 atomic_inc_64(&cp->cache_alloc_fail); in kmem_cache_alloc_debug()
2001 if (cp->cache_flags & KMF_DEADBEEF) in kmem_cache_alloc_debug()
2002 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_cache_alloc_debug()
2003 kmem_slab_free(cp, buf); in kmem_cache_alloc_debug()
2007 if (cp->cache_flags & KMF_AUDIT) { in kmem_cache_alloc_debug()
2008 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_cache_alloc_debug()
2011 if ((cp->cache_flags & KMF_LITE) && in kmem_cache_alloc_debug()
2012 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { in kmem_cache_alloc_debug()
2020 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller) in kmem_cache_free_debug() argument
2022 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_cache_free_debug()
2028 kmem_error(KMERR_DUPFREE, cp, buf); in kmem_cache_free_debug()
2031 sp = kmem_findslab(cp, buf); in kmem_cache_free_debug()
2032 if (sp == NULL || sp->slab_cache != cp) in kmem_cache_free_debug()
2033 kmem_error(KMERR_BADADDR, cp, buf); in kmem_cache_free_debug()
2035 kmem_error(KMERR_REDZONE, cp, buf); in kmem_cache_free_debug()
2041 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { in kmem_cache_free_debug()
2042 kmem_error(KMERR_BADBUFCTL, cp, buf); in kmem_cache_free_debug()
2047 kmem_error(KMERR_REDZONE, cp, buf); in kmem_cache_free_debug()
2051 if (cp->cache_flags & KMF_AUDIT) { in kmem_cache_free_debug()
2052 if (cp->cache_flags & KMF_CONTENTS) in kmem_cache_free_debug()
2054 buf, cp->cache_contents); in kmem_cache_free_debug()
2055 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_cache_free_debug()
2058 if ((cp->cache_flags & KMF_LITE) && in kmem_cache_free_debug()
2059 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { in kmem_cache_free_debug()
2063 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_free_debug()
2064 if (cp->cache_flags & KMF_LITE) in kmem_cache_free_debug()
2066 else if (cp->cache_destructor != NULL) in kmem_cache_free_debug()
2067 cp->cache_destructor(buf, cp->cache_private); in kmem_cache_free_debug()
2069 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_cache_free_debug()
2079 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds) in kmem_magazine_destroy() argument
2083 ASSERT(!list_link_active(&cp->cache_link) || in kmem_magazine_destroy()
2089 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_magazine_destroy()
2091 cp->cache_verify) != NULL) { in kmem_magazine_destroy()
2092 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_magazine_destroy()
2095 if ((cp->cache_flags & KMF_LITE) && in kmem_magazine_destroy()
2096 cp->cache_destructor != NULL) { in kmem_magazine_destroy()
2097 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_magazine_destroy()
2099 cp->cache_destructor(buf, cp->cache_private); in kmem_magazine_destroy()
2102 } else if (cp->cache_destructor != NULL) { in kmem_magazine_destroy()
2103 cp->cache_destructor(buf, cp->cache_private); in kmem_magazine_destroy()
2106 kmem_slab_free(cp, buf); in kmem_magazine_destroy()
2108 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_magazine_destroy()
2109 kmem_cache_free(cp->cache_magtype->mt_cache, mp); in kmem_magazine_destroy()
2116 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp) in kmem_depot_alloc() argument
2126 if (!mutex_tryenter(&cp->cache_depot_lock)) { in kmem_depot_alloc()
2127 mutex_enter(&cp->cache_depot_lock); in kmem_depot_alloc()
2128 cp->cache_depot_contention++; in kmem_depot_alloc()
2132 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_depot_alloc()
2139 mutex_exit(&cp->cache_depot_lock); in kmem_depot_alloc()
2148 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp) in kmem_depot_free() argument
2150 mutex_enter(&cp->cache_depot_lock); in kmem_depot_free()
2151 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_depot_free()
2155 mutex_exit(&cp->cache_depot_lock); in kmem_depot_free()
2162 kmem_depot_ws_update(kmem_cache_t *cp) in kmem_depot_ws_update() argument
2164 mutex_enter(&cp->cache_depot_lock); in kmem_depot_ws_update()
2165 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; in kmem_depot_ws_update()
2166 cp->cache_full.ml_min = cp->cache_full.ml_total; in kmem_depot_ws_update()
2167 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; in kmem_depot_ws_update()
2168 cp->cache_empty.ml_min = cp->cache_empty.ml_total; in kmem_depot_ws_update()
2169 mutex_exit(&cp->cache_depot_lock); in kmem_depot_ws_update()
2177 kmem_depot_ws_zero(kmem_cache_t *cp) in kmem_depot_ws_zero() argument
2179 mutex_enter(&cp->cache_depot_lock); in kmem_depot_ws_zero()
2180 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total; in kmem_depot_ws_zero()
2181 cp->cache_full.ml_min = cp->cache_full.ml_total; in kmem_depot_ws_zero()
2182 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total; in kmem_depot_ws_zero()
2183 cp->cache_empty.ml_min = cp->cache_empty.ml_total; in kmem_depot_ws_zero()
2184 mutex_exit(&cp->cache_depot_lock); in kmem_depot_ws_zero()
2191 kmem_depot_ws_reap(kmem_cache_t *cp) in kmem_depot_ws_reap() argument
2196 ASSERT(!list_link_active(&cp->cache_link) || in kmem_depot_ws_reap()
2199 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_depot_ws_reap()
2200 while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) in kmem_depot_ws_reap()
2201 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); in kmem_depot_ws_reap()
2203 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); in kmem_depot_ws_reap()
2204 while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) in kmem_depot_ws_reap()
2205 kmem_magazine_destroy(cp, mp, 0); in kmem_depot_ws_reap()
2239 #define KMEM_DUMPCTL(cp, buf) \ argument
2240 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2293 kmem_cache_t *cp; in kmem_dump_begin() local
2297 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_dump_begin()
2298 cp = list_next(&kmem_caches, cp)) { in kmem_dump_begin()
2299 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_dump_begin()
2301 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) { in kmem_dump_begin()
2302 cp->cache_flags |= KMF_DUMPDIVERT; in kmem_dump_begin()
2308 cp->cache_flags |= KMF_DUMPUNSAFE; in kmem_dump_begin()
2357 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag) in kmem_cache_alloc_dump() argument
2364 if ((buf = cp->cache_dump.kd_freelist) != NULL) { in kmem_cache_alloc_dump()
2365 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next; in kmem_cache_alloc_dump()
2371 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align); in kmem_cache_alloc_dump()
2372 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t); in kmem_cache_alloc_dump()
2375 if (cp->cache_align < PAGESIZE) { in kmem_cache_alloc_dump()
2386 cp->cache_dump.kd_alloc_fails++; in kmem_cache_alloc_dump()
2397 if (cp->cache_constructor != NULL && in kmem_cache_alloc_dump()
2398 cp->cache_constructor(buf, cp->cache_private, kmflag) in kmem_cache_alloc_dump()
2402 cp->cache_name, (void *)cp); in kmem_cache_alloc_dump()
2408 cp->cache_dump.kd_alloc_fails++; in kmem_cache_alloc_dump()
2420 kmem_cache_free_dump(kmem_cache_t *cp, void *buf) in kmem_cache_free_dump() argument
2425 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist; in kmem_cache_free_dump()
2426 cp->cache_dump.kd_freelist = buf; in kmem_cache_free_dump()
2442 kmem_cache_alloc(kmem_cache_t *cp, int kmflag) in kmem_cache_alloc() argument
2444 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_cache_alloc()
2462 cp->cache_dump.kd_unsafe++; in kmem_cache_alloc()
2465 kmem_cache_alloc_debug(cp, buf, kmflag, 0, in kmem_cache_alloc()
2493 cp->cache_dump.kd_unsafe++; in kmem_cache_alloc()
2495 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) != in kmem_cache_alloc()
2513 fmp = kmem_depot_alloc(cp, &cp->cache_full); in kmem_cache_alloc()
2516 kmem_depot_free(cp, &cp->cache_empty, in kmem_cache_alloc()
2534 buf = kmem_slab_alloc(cp, kmflag); in kmem_cache_alloc()
2539 if (cp->cache_flags & KMF_BUFTAG) { in kmem_cache_alloc()
2543 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller()); in kmem_cache_alloc()
2554 return (kmem_cache_alloc(cp, kmflag)); in kmem_cache_alloc()
2559 if (cp->cache_constructor != NULL && in kmem_cache_alloc()
2560 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) { in kmem_cache_alloc()
2561 atomic_inc_64(&cp->cache_alloc_fail); in kmem_cache_alloc()
2562 kmem_slab_free(cp, buf); in kmem_cache_alloc()
2576 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed) in kmem_slab_free_constructed() argument
2578 if (!freed && (cp->cache_flags & KMF_BUFTAG)) in kmem_slab_free_constructed()
2579 if (kmem_cache_free_debug(cp, buf, caller()) == -1) in kmem_slab_free_constructed()
2586 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF && in kmem_slab_free_constructed()
2587 cp->cache_destructor != NULL) { in kmem_slab_free_constructed()
2588 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */ in kmem_slab_free_constructed()
2589 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_slab_free_constructed()
2591 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_free_constructed()
2594 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_free_constructed()
2598 kmem_slab_free(cp, buf); in kmem_slab_free_constructed()
2609 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp) in kmem_cpucache_magazine_alloc() argument
2620 emp = kmem_depot_alloc(cp, &cp->cache_empty); in kmem_cpucache_magazine_alloc()
2623 kmem_depot_free(cp, &cp->cache_full, in kmem_cpucache_magazine_alloc()
2634 mtp = cp->cache_magtype; in kmem_cpucache_magazine_alloc()
2657 kmem_depot_free(cp, &cp->cache_empty, emp); in kmem_cpucache_magazine_alloc()
2672 kmem_cache_free(kmem_cache_t *cp, void *buf) in kmem_cache_free() argument
2674 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_cache_free()
2680 ASSERT(cp->cache_defrag == NULL || in kmem_cache_free()
2681 cp->cache_defrag->kmd_thread != curthread || in kmem_cache_free()
2682 (buf != cp->cache_defrag->kmd_from_buf && in kmem_cache_free()
2683 buf != cp->cache_defrag->kmd_to_buf)); in kmem_cache_free()
2689 cp->cache_dump.kd_unsafe++; in kmem_cache_free()
2690 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) { in kmem_cache_free()
2694 if (kmem_cache_free_debug(cp, buf, caller()) == -1) in kmem_cache_free()
2730 if (!kmem_cpucache_magazine_alloc(ccp, cp)) { in kmem_cache_free()
2740 kmem_slab_free_constructed(cp, buf, B_TRUE); in kmem_cache_free()
2744 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp) in kmem_slab_prefill() argument
2746 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_slab_prefill()
2747 int cache_flags = cp->cache_flags; in kmem_slab_prefill()
2757 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_prefill()
2759 ASSERT(cp->cache_constructor == NULL); in kmem_slab_prefill()
2760 ASSERT(sp->slab_cache == cp); in kmem_slab_prefill()
2763 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL); in kmem_slab_prefill()
2769 cp->cache_bufslab -= nbufs; in kmem_slab_prefill()
2770 cp->cache_slab_alloc += nbufs; in kmem_slab_prefill()
2771 list_insert_head(&cp->cache_complete_slabs, sp); in kmem_slab_prefill()
2772 cp->cache_complete_slab_count++; in kmem_slab_prefill()
2773 mutex_exit(&cp->cache_lock); in kmem_slab_prefill()
2777 void *buf = KMEM_BUF(cp, head); in kmem_slab_prefill()
2811 if (!kmem_cpucache_magazine_alloc(ccp, cp)) in kmem_slab_prefill()
2826 kmem_slab_free(cp, KMEM_BUF(cp, head)); in kmem_slab_prefill()
2833 mutex_enter(&cp->cache_lock); in kmem_slab_prefill()
2843 kmem_cache_t *cp = kmem_alloc_table[index]; in kmem_zalloc() local
2844 buf = kmem_cache_alloc(cp, kmflag); in kmem_zalloc()
2846 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { in kmem_zalloc()
2847 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_zalloc()
2851 if (cp->cache_flags & KMF_LITE) { in kmem_zalloc()
2870 kmem_cache_t *cp; in kmem_alloc() local
2874 cp = kmem_alloc_table[index]; in kmem_alloc()
2879 cp = kmem_big_alloc_table[index]; in kmem_alloc()
2900 buf = kmem_cache_alloc(cp, kmflag); in kmem_alloc()
2901 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) { in kmem_alloc()
2902 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_alloc()
2906 if (cp->cache_flags & KMF_LITE) { in kmem_alloc()
2917 kmem_cache_t *cp; in kmem_free() local
2920 cp = kmem_alloc_table[index]; in kmem_free()
2925 cp = kmem_big_alloc_table[index]; in kmem_free()
2936 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { in kmem_free()
2937 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_free()
2941 kmem_error(KMERR_DUPFREE, cp, buf); in kmem_free()
2946 kmem_error(KMERR_BADSIZE, cp, buf); in kmem_free()
2948 kmem_error(KMERR_REDZONE, cp, buf); in kmem_free()
2953 kmem_error(KMERR_REDZONE, cp, buf); in kmem_free()
2957 if (cp->cache_flags & KMF_LITE) { in kmem_free()
2962 kmem_cache_free(cp, buf); in kmem_free()
3035 kmem_cache_reap(kmem_cache_t *cp) in kmem_cache_reap() argument
3038 cp->cache_reap++; in kmem_cache_reap()
3047 if (cp->cache_reclaim != NULL) { in kmem_cache_reap()
3054 delta = cp->cache_full.ml_total; in kmem_cache_reap()
3055 cp->cache_reclaim(cp->cache_private); in kmem_cache_reap()
3056 delta = cp->cache_full.ml_total - delta; in kmem_cache_reap()
3058 mutex_enter(&cp->cache_depot_lock); in kmem_cache_reap()
3059 cp->cache_full.ml_reaplimit += delta; in kmem_cache_reap()
3060 cp->cache_full.ml_min += delta; in kmem_cache_reap()
3061 mutex_exit(&cp->cache_depot_lock); in kmem_cache_reap()
3065 kmem_depot_ws_reap(cp); in kmem_cache_reap()
3067 if (cp->cache_defrag != NULL && !kmem_move_noreap) { in kmem_cache_reap()
3068 kmem_cache_defrag(cp); in kmem_cache_reap()
3165 kmem_cache_magazine_purge(kmem_cache_t *cp) in kmem_cache_magazine_purge() argument
3171 ASSERT(!list_link_active(&cp->cache_link) || in kmem_cache_magazine_purge()
3173 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_cache_magazine_purge()
3176 ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_magazine_purge()
3191 kmem_magazine_destroy(cp, mp, rounds); in kmem_cache_magazine_purge()
3193 kmem_magazine_destroy(cp, pmp, prounds); in kmem_cache_magazine_purge()
3196 kmem_depot_ws_zero(cp); in kmem_cache_magazine_purge()
3197 kmem_depot_ws_reap(cp); in kmem_cache_magazine_purge()
3204 kmem_cache_magazine_enable(kmem_cache_t *cp) in kmem_cache_magazine_enable() argument
3208 if (cp->cache_flags & KMF_NOMAGAZINE) in kmem_cache_magazine_enable()
3212 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_magazine_enable()
3214 ccp->cc_magsize = cp->cache_magtype->mt_magsize; in kmem_cache_magazine_enable()
3224 kmem_cache_reap_now(kmem_cache_t *cp) in kmem_cache_reap_now() argument
3226 ASSERT(list_link_active(&cp->cache_link)); in kmem_cache_reap_now()
3228 kmem_depot_ws_zero(cp); in kmem_cache_reap_now()
3231 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP); in kmem_cache_reap_now()
3247 kmem_cache_magazine_resize(kmem_cache_t *cp) in kmem_cache_magazine_resize() argument
3249 kmem_magtype_t *mtp = cp->cache_magtype; in kmem_cache_magazine_resize()
3253 if (cp->cache_chunksize < mtp->mt_maxbuf) { in kmem_cache_magazine_resize()
3254 kmem_cache_magazine_purge(cp); in kmem_cache_magazine_resize()
3255 mutex_enter(&cp->cache_depot_lock); in kmem_cache_magazine_resize()
3256 cp->cache_magtype = ++mtp; in kmem_cache_magazine_resize()
3257 cp->cache_depot_contention_prev = in kmem_cache_magazine_resize()
3258 cp->cache_depot_contention + INT_MAX; in kmem_cache_magazine_resize()
3259 mutex_exit(&cp->cache_depot_lock); in kmem_cache_magazine_resize()
3260 kmem_cache_magazine_enable(cp); in kmem_cache_magazine_resize()
3269 kmem_hash_rescale(kmem_cache_t *cp) in kmem_hash_rescale() argument
3277 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); in kmem_hash_rescale()
3278 old_size = cp->cache_hash_mask + 1; in kmem_hash_rescale()
3289 mutex_enter(&cp->cache_lock); in kmem_hash_rescale()
3291 old_size = cp->cache_hash_mask + 1; in kmem_hash_rescale()
3292 old_table = cp->cache_hash_table; in kmem_hash_rescale()
3294 cp->cache_hash_mask = new_size - 1; in kmem_hash_rescale()
3295 cp->cache_hash_table = new_table; in kmem_hash_rescale()
3296 cp->cache_rescale++; in kmem_hash_rescale()
3303 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr); in kmem_hash_rescale()
3310 mutex_exit(&cp->cache_lock); in kmem_hash_rescale()
3320 kmem_cache_update(kmem_cache_t *cp) in kmem_cache_update() argument
3331 mutex_enter(&cp->cache_lock); in kmem_cache_update()
3333 if ((cp->cache_flags & KMF_HASH) && in kmem_cache_update()
3334 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || in kmem_cache_update()
3335 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && in kmem_cache_update()
3336 cp->cache_hash_mask > KMEM_HASH_INITIAL))) in kmem_cache_update()
3339 mutex_exit(&cp->cache_lock); in kmem_cache_update()
3344 kmem_depot_ws_update(cp); in kmem_cache_update()
3350 mutex_enter(&cp->cache_depot_lock); in kmem_cache_update()
3352 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && in kmem_cache_update()
3353 (int)(cp->cache_depot_contention - in kmem_cache_update()
3354 cp->cache_depot_contention_prev) > kmem_depot_contention) in kmem_cache_update()
3357 cp->cache_depot_contention_prev = cp->cache_depot_contention; in kmem_cache_update()
3359 mutex_exit(&cp->cache_depot_lock); in kmem_cache_update()
3363 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP); in kmem_cache_update()
3367 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP); in kmem_cache_update()
3369 if (cp->cache_defrag != NULL) in kmem_cache_update()
3371 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP); in kmem_cache_update()
3400 kmem_cache_t *cp = ksp->ks_private; in kmem_cache_kstat_update() local
3411 mutex_enter(&cp->cache_lock); in kmem_cache_kstat_update()
3413 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail; in kmem_cache_kstat_update()
3414 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc; in kmem_cache_kstat_update()
3415 kmcp->kmc_free.value.ui64 = cp->cache_slab_free; in kmem_cache_kstat_update()
3416 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc; in kmem_cache_kstat_update()
3417 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free; in kmem_cache_kstat_update()
3420 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_kstat_update()
3437 mutex_enter(&cp->cache_depot_lock); in kmem_cache_kstat_update()
3439 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc; in kmem_cache_kstat_update()
3440 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc; in kmem_cache_kstat_update()
3441 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention; in kmem_cache_kstat_update()
3442 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total; in kmem_cache_kstat_update()
3443 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total; in kmem_cache_kstat_update()
3445 (cp->cache_flags & KMF_NOMAGAZINE) ? in kmem_cache_kstat_update()
3446 0 : cp->cache_magtype->mt_magsize; in kmem_cache_kstat_update()
3448 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc; in kmem_cache_kstat_update()
3449 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc; in kmem_cache_kstat_update()
3450 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize; in kmem_cache_kstat_update()
3452 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_cache_kstat_update()
3453 reap = MIN(reap, cp->cache_full.ml_total); in kmem_cache_kstat_update()
3455 mutex_exit(&cp->cache_depot_lock); in kmem_cache_kstat_update()
3457 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize; in kmem_cache_kstat_update()
3458 kmcp->kmc_align.value.ui64 = cp->cache_align; in kmem_cache_kstat_update()
3459 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize; in kmem_cache_kstat_update()
3460 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize; in kmem_cache_kstat_update()
3462 buf_avail += cp->cache_bufslab; in kmem_cache_kstat_update()
3464 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail; in kmem_cache_kstat_update()
3465 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal; in kmem_cache_kstat_update()
3466 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax; in kmem_cache_kstat_update()
3467 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create; in kmem_cache_kstat_update()
3468 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy; in kmem_cache_kstat_update()
3469 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ? in kmem_cache_kstat_update()
3470 cp->cache_hash_mask + 1 : 0; in kmem_cache_kstat_update()
3471 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth; in kmem_cache_kstat_update()
3472 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale; in kmem_cache_kstat_update()
3473 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id; in kmem_cache_kstat_update()
3474 kmcp->kmc_reap.value.ui64 = cp->cache_reap; in kmem_cache_kstat_update()
3476 if (cp->cache_defrag == NULL) { in kmem_cache_kstat_update()
3491 kmem_defrag_t *kd = cp->cache_defrag; in kmem_cache_kstat_update()
3503 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1); in kmem_cache_kstat_update()
3505 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize); in kmem_cache_kstat_update()
3509 mutex_exit(&cp->cache_lock); in kmem_cache_kstat_update()
3519 kmem_cache_stat(kmem_cache_t *cp, char *name) in kmem_cache_stat() argument
3522 kstat_t *ksp = cp->cache_kstat; in kmem_cache_stat()
3609 const kmem_cache_t *cp; in kmem_partial_slab_cmp() local
3618 cp = s1->slab_cache; in kmem_partial_slab_cmp()
3619 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_partial_slab_cmp()
3620 binshift = cp->cache_partial_binshift; in kmem_partial_slab_cmp()
3625 w0 -= cp->cache_maxchunks; in kmem_partial_slab_cmp()
3631 w1 -= cp->cache_maxchunks; in kmem_partial_slab_cmp()
3667 kmem_cache_t *cp; in kmem_cache_create() local
3699 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE, in kmem_cache_create()
3701 bzero(cp, csize); in kmem_cache_create()
3702 list_link_init(&cp->cache_link); in kmem_cache_create()
3722 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG; in kmem_cache_create()
3730 if (cp->cache_flags & KMF_LITE) { in kmem_cache_create()
3734 cp->cache_flags |= KMF_BUFTAG; in kmem_cache_create()
3735 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); in kmem_cache_create()
3737 cp->cache_flags &= ~KMF_DEBUG; in kmem_cache_create()
3741 if (cp->cache_flags & KMF_DEADBEEF) in kmem_cache_create()
3742 cp->cache_flags |= KMF_REDZONE; in kmem_cache_create()
3744 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT)) in kmem_cache_create()
3745 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3748 cp->cache_flags &= ~KMF_DEBUG; in kmem_cache_create()
3751 cp->cache_flags &= ~KMF_TOUCH; in kmem_cache_create()
3754 cp->cache_flags |= KMF_PREFILL; in kmem_cache_create()
3757 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); in kmem_cache_create()
3760 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3762 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH)) in kmem_cache_create()
3763 cp->cache_flags |= KMF_REDZONE; in kmem_cache_create()
3765 if (!(cp->cache_flags & KMF_AUDIT)) in kmem_cache_create()
3766 cp->cache_flags &= ~KMF_CONTENTS; in kmem_cache_create()
3768 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall && in kmem_cache_create()
3769 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH)) in kmem_cache_create()
3770 cp->cache_flags |= KMF_FIREWALL; in kmem_cache_create()
3773 cp->cache_flags &= ~KMF_FIREWALL; in kmem_cache_create()
3775 if (cp->cache_flags & KMF_FIREWALL) { in kmem_cache_create()
3776 cp->cache_flags &= ~KMF_BUFTAG; in kmem_cache_create()
3777 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3785 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN); in kmem_cache_create()
3786 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1); in kmem_cache_create()
3787 cp->cache_bufsize = bufsize; in kmem_cache_create()
3788 cp->cache_align = align; in kmem_cache_create()
3789 cp->cache_constructor = constructor; in kmem_cache_create()
3790 cp->cache_destructor = destructor; in kmem_cache_create()
3791 cp->cache_reclaim = reclaim; in kmem_cache_create()
3792 cp->cache_private = private; in kmem_cache_create()
3793 cp->cache_arena = vmp; in kmem_cache_create()
3794 cp->cache_cflags = cflags; in kmem_cache_create()
3803 cp->cache_bufctl = chunksize - KMEM_ALIGN; in kmem_cache_create()
3806 if (cp->cache_flags & KMF_BUFTAG) { in kmem_cache_create()
3807 cp->cache_bufctl = chunksize; in kmem_cache_create()
3808 cp->cache_buftag = chunksize; in kmem_cache_create()
3809 if (cp->cache_flags & KMF_LITE) in kmem_cache_create()
3815 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_create()
3816 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify); in kmem_cache_create()
3817 if (cp->cache_flags & KMF_LITE) in kmem_cache_create()
3818 cp->cache_verify = sizeof (uint64_t); in kmem_cache_create()
3821 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave); in kmem_cache_create()
3823 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); in kmem_cache_create()
3829 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); in kmem_cache_create()
3830 cp->cache_mincolor = cp->cache_slabsize - chunksize; in kmem_cache_create()
3831 cp->cache_maxcolor = cp->cache_mincolor; in kmem_cache_create()
3832 cp->cache_flags |= KMF_HASH; in kmem_cache_create()
3833 ASSERT(!(cp->cache_flags & KMF_BUFTAG)); in kmem_cache_create()
3835 !(cp->cache_flags & KMF_AUDIT) && in kmem_cache_create()
3837 cp->cache_slabsize = vmp->vm_quantum; in kmem_cache_create()
3838 cp->cache_mincolor = 0; in kmem_cache_create()
3839 cp->cache_maxcolor = in kmem_cache_create()
3840 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize; in kmem_cache_create()
3841 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize); in kmem_cache_create()
3842 ASSERT(!(cp->cache_flags & KMF_AUDIT)); in kmem_cache_create()
3859 cp->cache_slabsize = bestfit; in kmem_cache_create()
3860 cp->cache_mincolor = 0; in kmem_cache_create()
3861 cp->cache_maxcolor = bestfit % chunksize; in kmem_cache_create()
3862 cp->cache_flags |= KMF_HASH; in kmem_cache_create()
3865 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize); in kmem_cache_create()
3866 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1; in kmem_cache_create()
3875 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) || in kmem_cache_create()
3876 cp->cache_constructor != NULL) in kmem_cache_create()
3877 cp->cache_flags &= ~KMF_PREFILL; in kmem_cache_create()
3879 if (cp->cache_flags & KMF_HASH) { in kmem_cache_create()
3881 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ? in kmem_cache_create()
3885 if (cp->cache_maxcolor >= vmp->vm_quantum) in kmem_cache_create()
3886 cp->cache_maxcolor = vmp->vm_quantum - 1; in kmem_cache_create()
3888 cp->cache_color = cp->cache_mincolor; in kmem_cache_create()
3893 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL); in kmem_cache_create()
3895 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp, in kmem_cache_create()
3900 list_create(&cp->cache_complete_slabs, in kmem_cache_create()
3903 if (cp->cache_flags & KMF_HASH) { in kmem_cache_create()
3904 cp->cache_hash_table = vmem_alloc(kmem_hash_arena, in kmem_cache_create()
3906 bzero(cp->cache_hash_table, in kmem_cache_create()
3908 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1; in kmem_cache_create()
3909 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; in kmem_cache_create()
3915 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL); in kmem_cache_create()
3920 cp->cache_magtype = mtp; in kmem_cache_create()
3926 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_create()
3928 ccp->cc_flags = cp->cache_flags; in kmem_cache_create()
3936 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name, in kmem_cache_create()
3940 cp->cache_kstat->ks_data = &kmem_cache_kstat; in kmem_cache_create()
3941 cp->cache_kstat->ks_update = kmem_cache_kstat_update; in kmem_cache_create()
3942 cp->cache_kstat->ks_private = cp; in kmem_cache_create()
3943 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock; in kmem_cache_create()
3944 kstat_install(cp->cache_kstat); in kmem_cache_create()
3952 list_insert_tail(&kmem_caches, cp); in kmem_cache_create()
3956 kmem_cache_magazine_enable(cp); in kmem_cache_create()
3958 return (cp); in kmem_cache_create()
4001 kmem_cache_set_move(kmem_cache_t *cp, in kmem_cache_set_move() argument
4013 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH)); in kmem_cache_set_move()
4014 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER)); in kmem_cache_set_move()
4023 mutex_enter(&cp->cache_lock); in kmem_cache_set_move()
4025 if (KMEM_IS_MOVABLE(cp)) { in kmem_cache_set_move()
4026 if (cp->cache_move == NULL) { in kmem_cache_set_move()
4027 ASSERT(cp->cache_slab_alloc == 0); in kmem_cache_set_move()
4029 cp->cache_defrag = defrag; in kmem_cache_set_move()
4031 bzero(cp->cache_defrag, sizeof (kmem_defrag_t)); in kmem_cache_set_move()
4032 avl_create(&cp->cache_defrag->kmd_moves_pending, in kmem_cache_set_move()
4038 list_create(&cp->cache_defrag->kmd_deadlist, in kmem_cache_set_move()
4041 kmem_reset_reclaim_threshold(cp->cache_defrag); in kmem_cache_set_move()
4043 cp->cache_move = move; in kmem_cache_set_move()
4046 mutex_exit(&cp->cache_lock); in kmem_cache_set_move()
4054 kmem_cache_destroy(kmem_cache_t *cp) in kmem_cache_destroy() argument
4064 list_remove(&kmem_caches, cp); in kmem_cache_destroy()
4072 kmem_cache_magazine_purge(cp); in kmem_cache_destroy()
4074 mutex_enter(&cp->cache_lock); in kmem_cache_destroy()
4075 if (cp->cache_buftotal != 0) in kmem_cache_destroy()
4077 cp->cache_name, (void *)cp); in kmem_cache_destroy()
4078 if (cp->cache_defrag != NULL) { in kmem_cache_destroy()
4079 avl_destroy(&cp->cache_defrag->kmd_moves_pending); in kmem_cache_destroy()
4080 list_destroy(&cp->cache_defrag->kmd_deadlist); in kmem_cache_destroy()
4081 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag); in kmem_cache_destroy()
4082 cp->cache_defrag = NULL; in kmem_cache_destroy()
4090 cp->cache_constructor = (int (*)(void *, void *, int))1; in kmem_cache_destroy()
4091 cp->cache_destructor = (void (*)(void *, void *))2; in kmem_cache_destroy()
4092 cp->cache_reclaim = (void (*)(void *))3; in kmem_cache_destroy()
4093 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4; in kmem_cache_destroy()
4094 mutex_exit(&cp->cache_lock); in kmem_cache_destroy()
4096 kstat_delete(cp->cache_kstat); in kmem_cache_destroy()
4098 if (cp->cache_hash_table != NULL) in kmem_cache_destroy()
4099 vmem_free(kmem_hash_arena, cp->cache_hash_table, in kmem_cache_destroy()
4100 (cp->cache_hash_mask + 1) * sizeof (void *)); in kmem_cache_destroy()
4103 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); in kmem_cache_destroy()
4105 mutex_destroy(&cp->cache_depot_lock); in kmem_cache_destroy()
4106 mutex_destroy(&cp->cache_lock); in kmem_cache_destroy()
4108 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus)); in kmem_cache_destroy()
4137 kmem_cache_t *cp; in kmem_alloc_caches_create() local
4156 cp = kmem_cache_create(name, cache_size, align, in kmem_alloc_caches_create()
4160 alloc_table[(size - 1) >> shift] = cp; in kmem_alloc_caches_create()
4269 kmem_cache_t *cp; in kmem_init() local
4338 while ((cp = list_tail(&kmem_caches)) != NULL) in kmem_init()
4339 kmem_cache_destroy(cp); in kmem_init()
4524 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf) in kmem_slab_allocated() argument
4528 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_allocated()
4531 if (cp->cache_flags & KMF_HASH) { in kmem_slab_allocated()
4532 for (bcp = *KMEM_HASH(cp, buf); in kmem_slab_allocated()
4542 sp = KMEM_SLAB(cp, buf); in kmem_slab_allocated()
4544 bufbcp = KMEM_BUFCTL(cp, buf); in kmem_slab_allocated()
4554 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags) in kmem_slab_is_reclaimable() argument
4558 ASSERT(cp->cache_defrag != NULL); in kmem_slab_is_reclaimable()
4593 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer)); in kmem_slab_is_reclaimable()
4597 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf, in kmem_hunt_mag() argument
4604 if (cp->cache_flags & KMF_BUFTAG) { in kmem_hunt_mag()
4605 (void) kmem_cache_free_debug(cp, tbuf, in kmem_hunt_mag()
4622 kmem_hunt_mags(kmem_cache_t *cp, void *buf) in kmem_hunt_mags() argument
4630 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_hunt_mags()
4636 tbuf = kmem_cache_alloc(cp, KM_NOSLEEP); in kmem_hunt_mags()
4643 if (cp->cache_flags & KMF_BUFTAG) { in kmem_hunt_mags()
4644 (void) kmem_cache_free_debug(cp, buf, caller()); in kmem_hunt_mags()
4650 mutex_enter(&cp->cache_depot_lock); in kmem_hunt_mags()
4651 n = cp->cache_magtype->mt_magsize; in kmem_hunt_mags()
4652 for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) { in kmem_hunt_mags()
4653 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { in kmem_hunt_mags()
4654 mutex_exit(&cp->cache_depot_lock); in kmem_hunt_mags()
4658 mutex_exit(&cp->cache_depot_lock); in kmem_hunt_mags()
4662 ccp = &cp->cache_cpu[cpu_seqid]; in kmem_hunt_mags()
4667 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { in kmem_hunt_mags()
4673 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) { in kmem_hunt_mags()
4680 kmem_cache_free(cp, tbuf); in kmem_hunt_mags()
4689 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) in kmem_slab_move_yes() argument
4691 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_move_yes()
4700 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_move_yes()
4703 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_move_yes()
4712 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) in kmem_slab_move_no() argument
4715 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_move_no()
4722 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_move_no()
4726 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_move_no()
4767 kmem_cache_t *cp = sp->slab_cache; in kmem_move_buffer() local
4771 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_buffer()
4780 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) { in kmem_move_buffer()
4784 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4785 kmem_move_end(cp, callback); in kmem_move_buffer()
4795 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4796 free_on_slab = (kmem_slab_allocated(cp, sp, in kmem_move_buffer()
4798 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4802 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4803 kmem_move_end(cp, callback); in kmem_move_buffer()
4807 if (cp->cache_flags & KMF_BUFTAG) { in kmem_move_buffer()
4811 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf, in kmem_move_buffer()
4814 kmem_move_end(cp, callback); in kmem_move_buffer()
4817 } else if (cp->cache_constructor != NULL && in kmem_move_buffer()
4818 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private, in kmem_move_buffer()
4820 atomic_inc_64(&cp->cache_alloc_fail); in kmem_move_buffer()
4822 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4823 kmem_move_end(cp, callback); in kmem_move_buffer()
4830 cp->cache_defrag->kmd_callbacks++; in kmem_move_buffer()
4831 cp->cache_defrag->kmd_thread = curthread; in kmem_move_buffer()
4832 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf; in kmem_move_buffer()
4833 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf; in kmem_move_buffer()
4834 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *, in kmem_move_buffer()
4837 response = cp->cache_move(callback->kmm_from_buf, in kmem_move_buffer()
4838 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private); in kmem_move_buffer()
4840 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *, in kmem_move_buffer()
4842 cp->cache_defrag->kmd_thread = NULL; in kmem_move_buffer()
4843 cp->cache_defrag->kmd_from_buf = NULL; in kmem_move_buffer()
4844 cp->cache_defrag->kmd_to_buf = NULL; in kmem_move_buffer()
4848 cp->cache_defrag->kmd_yes++; in kmem_move_buffer()
4849 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); in kmem_move_buffer()
4852 cp->cache_defrag->kmd_slabs_freed++; in kmem_move_buffer()
4853 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4854 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4855 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4856 kmem_move_end(cp, callback); in kmem_move_buffer()
4863 cp->cache_defrag->kmd_no++; in kmem_move_buffer()
4864 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4865 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4866 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4870 cp->cache_defrag->kmd_later++; in kmem_move_buffer()
4871 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4873 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4879 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4884 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4888 cp->cache_defrag->kmd_dont_need++; in kmem_move_buffer()
4889 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); in kmem_move_buffer()
4891 cp->cache_defrag->kmd_slabs_freed++; in kmem_move_buffer()
4892 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4893 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4894 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4898 cp->cache_defrag->kmd_dont_know++; in kmem_move_buffer()
4899 if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) { in kmem_move_buffer()
4901 cp->cache_defrag->kmd_hunt_found++; in kmem_move_buffer()
4902 kmem_slab_free_constructed(cp, callback->kmm_from_buf, in kmem_move_buffer()
4905 cp->cache_defrag->kmd_slabs_freed++; in kmem_move_buffer()
4906 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4907 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4908 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4913 cp->cache_name, (void *)cp, response); in kmem_move_buffer()
4916 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE); in kmem_move_buffer()
4917 kmem_move_end(cp, callback); in kmem_move_buffer()
4922 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags) in kmem_move_begin() argument
4930 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_begin()
4943 mutex_enter(&cp->cache_lock); in kmem_move_begin()
4945 n = avl_numnodes(&cp->cache_partial_slabs); in kmem_move_begin()
4947 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4952 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index); in kmem_move_begin()
4961 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4967 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs), in kmem_move_begin()
4970 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index); in kmem_move_begin()
4972 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4977 mutex_enter(&cp->cache_lock); in kmem_move_begin()
4978 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); in kmem_move_begin()
4979 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4980 kmem_slab_free(cp, to_buf); in kmem_move_begin()
4989 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback) in kmem_move_end() argument
4993 ASSERT(cp->cache_defrag != NULL); in kmem_move_end()
4995 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_end()
4997 mutex_enter(&cp->cache_lock); in kmem_move_end()
4998 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending, in kmem_move_end()
5000 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); in kmem_move_end()
5001 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) { in kmem_move_end()
5002 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_move_end()
5018 cp->cache_defrag->kmd_deadcount--; in kmem_move_end()
5019 cp->cache_slab_destroy++; in kmem_move_end()
5020 mutex_exit(&cp->cache_lock); in kmem_move_end()
5021 kmem_slab_destroy(cp, sp); in kmem_move_end()
5023 mutex_enter(&cp->cache_lock); in kmem_move_end()
5026 mutex_exit(&cp->cache_lock); in kmem_move_end()
5045 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs, in kmem_move_buffers() argument
5058 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_move_buffers()
5060 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL); in kmem_move_buffers()
5061 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) : in kmem_move_buffers()
5062 avl_numnodes(&cp->cache_partial_slabs) > 1); in kmem_move_buffers()
5085 sp = avl_last(&cp->cache_partial_slabs); in kmem_move_buffers()
5088 ((sp != avl_first(&cp->cache_partial_slabs)) || in kmem_move_buffers()
5090 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) { in kmem_move_buffers()
5092 if (!kmem_slab_is_reclaimable(cp, sp, flags)) { in kmem_move_buffers()
5100 buf = (((char *)buf) + cp->cache_chunksize), j++) { in kmem_move_buffers()
5102 if (kmem_slab_allocated(cp, sp, buf) == NULL) { in kmem_move_buffers()
5126 mutex_exit(&cp->cache_lock); in kmem_move_buffers()
5128 success = kmem_move_begin(cp, sp, buf, flags); in kmem_move_buffers()
5142 mutex_enter(&cp->cache_lock); in kmem_move_buffers()
5148 &cp->cache_defrag->kmd_deadlist; in kmem_move_buffers()
5152 &cp->cache_defrag->kmd_moves_pending)) { in kmem_move_buffers()
5179 cp->cache_defrag->kmd_deadcount--; in kmem_move_buffers()
5180 cp->cache_slab_destroy++; in kmem_move_buffers()
5181 mutex_exit(&cp->cache_lock); in kmem_move_buffers()
5182 kmem_slab_destroy(cp, sp); in kmem_move_buffers()
5187 mutex_enter(&cp->cache_lock); in kmem_move_buffers()
5243 ASSERT(!avl_is_empty(&cp->cache_partial_slabs)); in kmem_move_buffers()
5244 if (sp == avl_first(&cp->cache_partial_slabs)) { in kmem_move_buffers()
5257 (sp == avl_first(&cp->cache_partial_slabs)), in kmem_move_buffers()
5272 kmem_cache_t *cp = args->kmna_cache; in kmem_cache_move_notify_task() local
5277 ASSERT(list_link_active(&cp->cache_link)); in kmem_cache_move_notify_task()
5280 mutex_enter(&cp->cache_lock); in kmem_cache_move_notify_task()
5281 sp = kmem_slab_allocated(cp, NULL, buf); in kmem_cache_move_notify_task()
5285 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5290 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { in kmem_cache_move_notify_task()
5298 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5302 kmem_slab_move_yes(cp, sp, buf); in kmem_cache_move_notify_task()
5305 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5307 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY); in kmem_cache_move_notify_task()
5308 mutex_enter(&cp->cache_lock); in kmem_cache_move_notify_task()
5312 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_cache_move_notify_task()
5316 &cp->cache_defrag->kmd_moves_pending)) { in kmem_cache_move_notify_task()
5318 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5324 cp->cache_defrag->kmd_deadcount--; in kmem_cache_move_notify_task()
5325 cp->cache_slab_destroy++; in kmem_cache_move_notify_task()
5326 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5327 kmem_slab_destroy(cp, sp); in kmem_cache_move_notify_task()
5334 kmem_slab_move_yes(cp, sp, buf); in kmem_cache_move_notify_task()
5336 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5340 kmem_cache_move_notify(kmem_cache_t *cp, void *buf) in kmem_cache_move_notify() argument
5347 args->kmna_cache = cp; in kmem_cache_move_notify()
5357 kmem_cache_defrag(kmem_cache_t *cp) in kmem_cache_defrag() argument
5361 ASSERT(cp->cache_defrag != NULL); in kmem_cache_defrag()
5363 mutex_enter(&cp->cache_lock); in kmem_cache_defrag()
5364 n = avl_numnodes(&cp->cache_partial_slabs); in kmem_cache_defrag()
5368 cp->cache_defrag->kmd_defrags++; in kmem_cache_defrag()
5369 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE); in kmem_cache_defrag()
5371 mutex_exit(&cp->cache_lock); in kmem_cache_defrag()
5376 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree) in kmem_cache_frag_threshold() argument
5384 (cp->cache_buftotal * kmem_frag_numer)); in kmem_cache_frag_threshold()
5388 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap) in kmem_cache_is_fragmented() argument
5393 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_cache_is_fragmented()
5397 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { in kmem_cache_is_fragmented()
5401 if ((cp->cache_complete_slab_count + avl_numnodes( in kmem_cache_is_fragmented()
5402 &cp->cache_partial_slabs)) < kmem_frag_minslabs) { in kmem_cache_is_fragmented()
5407 nfree = cp->cache_bufslab; in kmem_cache_is_fragmented()
5408 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) && in kmem_cache_is_fragmented()
5409 kmem_cache_frag_threshold(cp, nfree)); in kmem_cache_is_fragmented()
5420 mutex_enter(&cp->cache_depot_lock); in kmem_cache_is_fragmented()
5421 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_cache_is_fragmented()
5422 reap = MIN(reap, cp->cache_full.ml_total); in kmem_cache_is_fragmented()
5423 mutex_exit(&cp->cache_depot_lock); in kmem_cache_is_fragmented()
5425 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize); in kmem_cache_is_fragmented()
5426 if (kmem_cache_frag_threshold(cp, nfree)) { in kmem_cache_is_fragmented()
5436 kmem_cache_scan(kmem_cache_t *cp) in kmem_cache_scan() argument
5443 mutex_enter(&cp->cache_lock); in kmem_cache_scan()
5445 kmd = cp->cache_defrag; in kmem_cache_scan()
5448 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5449 kmem_cache_reap(cp); in kmem_cache_scan()
5453 if (kmem_cache_is_fragmented(cp, &reap)) { in kmem_cache_scan()
5468 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range, in kmem_cache_scan()
5493 kmem_reset_reclaim_threshold(cp->cache_defrag); in kmem_cache_scan()
5495 if (!avl_is_empty(&cp->cache_partial_slabs)) { in kmem_cache_scan()
5506 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5508 kmem_cache_reap(cp); in kmem_cache_scan()
5514 (void) kmem_move_buffers(cp, in kmem_cache_scan()
5521 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5525 kmem_depot_ws_reap(cp); in kmem_cache_scan()