Lines Matching refs:cp
1179 kmem_cache_t *cp; in kmem_cache_applyall() local
1182 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_cache_applyall()
1183 cp = list_next(&kmem_caches, cp)) in kmem_cache_applyall()
1185 (void) taskq_dispatch(tq, (task_func_t *)func, cp, in kmem_cache_applyall()
1188 func(cp); in kmem_cache_applyall()
1195 kmem_cache_t *cp; in kmem_cache_applyall_id() local
1198 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_cache_applyall_id()
1199 cp = list_next(&kmem_caches, cp)) { in kmem_cache_applyall_id()
1200 if (!(cp->cache_cflags & KMC_IDENTIFIER)) in kmem_cache_applyall_id()
1203 (void) taskq_dispatch(tq, (task_func_t *)func, cp, in kmem_cache_applyall_id()
1206 func(cp); in kmem_cache_applyall_id()
1215 kmem_findslab(kmem_cache_t *cp, void *buf) in kmem_findslab() argument
1219 mutex_enter(&cp->cache_lock); in kmem_findslab()
1220 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL; in kmem_findslab()
1221 sp = list_next(&cp->cache_complete_slabs, sp)) { in kmem_findslab()
1223 mutex_exit(&cp->cache_lock); in kmem_findslab()
1227 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL; in kmem_findslab()
1228 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) { in kmem_findslab()
1230 mutex_exit(&cp->cache_lock); in kmem_findslab()
1234 mutex_exit(&cp->cache_lock); in kmem_findslab()
1244 kmem_cache_t *cp = cparg; in kmem_error() local
1253 sp = kmem_findslab(cp, buf); in kmem_error()
1255 for (cp = list_tail(&kmem_caches); cp != NULL; in kmem_error()
1256 cp = list_prev(&kmem_caches, cp)) { in kmem_error()
1257 if ((sp = kmem_findslab(cp, buf)) != NULL) in kmem_error()
1263 cp = NULL; in kmem_error()
1266 if (cp != cparg) in kmem_error()
1270 (uintptr_t)sp->slab_base) % cp->cache_chunksize; in kmem_error()
1273 if (cp->cache_flags & KMF_BUFTAG) in kmem_error()
1274 btp = KMEM_BUFTAG(cp, buf); in kmem_error()
1275 if (cp->cache_flags & KMF_HASH) { in kmem_error()
1276 mutex_enter(&cp->cache_lock); in kmem_error()
1277 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) in kmem_error()
1280 mutex_exit(&cp->cache_lock); in kmem_error()
1283 if (kmem_findslab(cp->cache_bufctl_cache, bcp) == in kmem_error()
1296 kmem_panic_info.kmp_realcache = cp; in kmem_error()
1306 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_error()
1340 printf("buffer was allocated from %s,\n", cp->cache_name); in kmem_error()
1359 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) && in kmem_error()
1369 (void *)sp, cp->cache_name); in kmem_error()
1459 #define KMEM_AUDIT(lp, cp, bcp) \ argument
1469 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp, in kmem_log_event() argument
1477 bca.bc_cache = cp; in kmem_log_event()
1478 KMEM_AUDIT(lp, cp, &bca); in kmem_log_event()
1485 kmem_slab_create(kmem_cache_t *cp, int kmflag) in kmem_slab_create() argument
1487 size_t slabsize = cp->cache_slabsize; in kmem_slab_create()
1488 size_t chunksize = cp->cache_chunksize; in kmem_slab_create()
1489 int cache_flags = cp->cache_flags; in kmem_slab_create()
1494 vmem_t *vmp = cp->cache_arena; in kmem_slab_create()
1496 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_slab_create()
1498 color = cp->cache_color + cp->cache_align; in kmem_slab_create()
1499 if (color > cp->cache_maxcolor) in kmem_slab_create()
1500 color = cp->cache_mincolor; in kmem_slab_create()
1501 cp->cache_color = color; in kmem_slab_create()
1516 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH)); in kmem_slab_create()
1517 if (!(cp->cache_cflags & KMC_NOTOUCH)) in kmem_slab_create()
1525 sp = KMEM_SLAB(cp, slab); in kmem_slab_create()
1529 sp->slab_cache = cp; in kmem_slab_create()
1541 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag); in kmem_slab_create()
1548 bcap->bc_cache = cp; in kmem_slab_create()
1553 bcp = KMEM_BUFCTL(cp, buf); in kmem_slab_create()
1556 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_slab_create()
1562 cp->cache_verify); in kmem_slab_create()
1570 kmem_log_event(kmem_slab_log, cp, sp, slab); in kmem_slab_create()
1578 kmem_cache_free(cp->cache_bufctl_cache, bcp); in kmem_slab_create()
1588 kmem_log_event(kmem_failure_log, cp, NULL, NULL); in kmem_slab_create()
1589 atomic_inc_64(&cp->cache_alloc_fail); in kmem_slab_create()
1598 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp) in kmem_slab_destroy() argument
1600 vmem_t *vmp = cp->cache_arena; in kmem_slab_destroy()
1603 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_slab_destroy()
1606 if (cp->cache_flags & KMF_HASH) { in kmem_slab_destroy()
1610 kmem_cache_free(cp->cache_bufctl_cache, bcp); in kmem_slab_destroy()
1614 vmem_free(vmp, slab, cp->cache_slabsize); in kmem_slab_destroy()
1618 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill) in kmem_slab_alloc_impl() argument
1624 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_alloc_impl()
1631 (sp == avl_first(&cp->cache_partial_slabs)))); in kmem_slab_alloc_impl()
1632 ASSERT(sp->slab_cache == cp); in kmem_slab_alloc_impl()
1634 cp->cache_slab_alloc++; in kmem_slab_alloc_impl()
1635 cp->cache_bufslab--; in kmem_slab_alloc_impl()
1641 if (cp->cache_flags & KMF_HASH) { in kmem_slab_alloc_impl()
1646 hash_bucket = KMEM_HASH(cp, buf); in kmem_slab_alloc_impl()
1649 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { in kmem_slab_alloc_impl()
1650 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_slab_alloc_impl()
1653 buf = KMEM_BUF(cp, bcp); in kmem_slab_alloc_impl()
1664 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_alloc_impl()
1669 list_insert_head(&cp->cache_complete_slabs, sp); in kmem_slab_alloc_impl()
1670 cp->cache_complete_slab_count++; in kmem_slab_alloc_impl()
1680 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) && in kmem_slab_alloc_impl()
1681 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) { in kmem_slab_alloc_impl()
1682 kmem_slab_prefill(cp, sp); in kmem_slab_alloc_impl()
1687 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_alloc_impl()
1695 ASSERT(!avl_update(&cp->cache_partial_slabs, sp)); in kmem_slab_alloc_impl()
1703 kmem_slab_alloc(kmem_cache_t *cp, int kmflag) in kmem_slab_alloc() argument
1709 mutex_enter(&cp->cache_lock); in kmem_slab_alloc()
1710 test_destructor = (cp->cache_slab_alloc == 0); in kmem_slab_alloc()
1711 sp = avl_first(&cp->cache_partial_slabs); in kmem_slab_alloc()
1713 ASSERT(cp->cache_bufslab == 0); in kmem_slab_alloc()
1718 mutex_exit(&cp->cache_lock); in kmem_slab_alloc()
1719 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) { in kmem_slab_alloc()
1722 mutex_enter(&cp->cache_lock); in kmem_slab_alloc()
1723 cp->cache_slab_create++; in kmem_slab_alloc()
1724 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) in kmem_slab_alloc()
1725 cp->cache_bufmax = cp->cache_buftotal; in kmem_slab_alloc()
1726 cp->cache_bufslab += sp->slab_chunks; in kmem_slab_alloc()
1729 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE); in kmem_slab_alloc()
1730 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == in kmem_slab_alloc()
1731 (cp->cache_complete_slab_count + in kmem_slab_alloc()
1732 avl_numnodes(&cp->cache_partial_slabs) + in kmem_slab_alloc()
1733 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); in kmem_slab_alloc()
1734 mutex_exit(&cp->cache_lock); in kmem_slab_alloc()
1736 if (test_destructor && cp->cache_destructor != NULL) { in kmem_slab_alloc()
1742 if ((cp->cache_constructor == NULL) || in kmem_slab_alloc()
1743 cp->cache_constructor(buf, cp->cache_private, in kmem_slab_alloc()
1745 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_alloc()
1748 cp->cache_bufsize); in kmem_slab_alloc()
1749 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_slab_alloc()
1750 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_slab_alloc()
1763 kmem_slab_free(kmem_cache_t *cp, void *buf) in kmem_slab_free() argument
1770 mutex_enter(&cp->cache_lock); in kmem_slab_free()
1771 cp->cache_slab_free++; in kmem_slab_free()
1773 if (cp->cache_flags & KMF_HASH) { in kmem_slab_free()
1777 prev_bcpp = KMEM_HASH(cp, buf); in kmem_slab_free()
1784 cp->cache_lookup_depth++; in kmem_slab_free()
1788 bcp = KMEM_BUFCTL(cp, buf); in kmem_slab_free()
1789 sp = KMEM_SLAB(cp, buf); in kmem_slab_free()
1792 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) { in kmem_slab_free()
1793 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1794 kmem_error(KMERR_BADADDR, cp, buf); in kmem_slab_free()
1806 kmem_slab_move_yes(cp, sp, buf); in kmem_slab_free()
1809 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { in kmem_slab_free()
1810 if (cp->cache_flags & KMF_CONTENTS) in kmem_slab_free()
1813 cp->cache_contents); in kmem_slab_free()
1814 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_slab_free()
1820 cp->cache_bufslab++; in kmem_slab_free()
1829 list_remove(&cp->cache_complete_slabs, sp); in kmem_slab_free()
1830 cp->cache_complete_slab_count--; in kmem_slab_free()
1832 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1835 cp->cache_buftotal -= sp->slab_chunks; in kmem_slab_free()
1836 cp->cache_bufslab -= sp->slab_chunks; in kmem_slab_free()
1848 if (cp->cache_defrag == NULL || in kmem_slab_free()
1849 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) && in kmem_slab_free()
1851 cp->cache_slab_destroy++; in kmem_slab_free()
1852 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1853 kmem_slab_destroy(cp, sp); in kmem_slab_free()
1855 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_slab_free()
1869 cp->cache_defrag->kmd_deadcount++; in kmem_slab_free()
1870 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1879 list_remove(&cp->cache_complete_slabs, sp); in kmem_slab_free()
1880 cp->cache_complete_slab_count--; in kmem_slab_free()
1881 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1883 (void) avl_update_gt(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1886 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == in kmem_slab_free()
1887 (cp->cache_complete_slab_count + in kmem_slab_free()
1888 avl_numnodes(&cp->cache_partial_slabs) + in kmem_slab_free()
1889 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); in kmem_slab_free()
1890 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1897 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct, in kmem_cache_alloc_debug() argument
1900 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_cache_alloc_debug()
1905 kmem_error(KMERR_BADBUFTAG, cp, buf); in kmem_cache_alloc_debug()
1911 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { in kmem_cache_alloc_debug()
1912 kmem_error(KMERR_BADBUFCTL, cp, buf); in kmem_cache_alloc_debug()
1916 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_alloc_debug()
1917 if (!construct && (cp->cache_flags & KMF_LITE)) { in kmem_cache_alloc_debug()
1919 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_cache_alloc_debug()
1922 if (cp->cache_constructor != NULL) in kmem_cache_alloc_debug()
1930 cp->cache_verify)) { in kmem_cache_alloc_debug()
1931 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_cache_alloc_debug()
1938 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 && in kmem_cache_alloc_debug()
1941 kmem_log_event(kmem_failure_log, cp, NULL, NULL); in kmem_cache_alloc_debug()
1942 if (!construct && cp->cache_destructor != NULL) in kmem_cache_alloc_debug()
1943 cp->cache_destructor(buf, cp->cache_private); in kmem_cache_alloc_debug()
1948 if (mtbf || (construct && cp->cache_constructor != NULL && in kmem_cache_alloc_debug()
1949 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) { in kmem_cache_alloc_debug()
1950 atomic_inc_64(&cp->cache_alloc_fail); in kmem_cache_alloc_debug()
1952 if (cp->cache_flags & KMF_DEADBEEF) in kmem_cache_alloc_debug()
1953 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_cache_alloc_debug()
1954 kmem_slab_free(cp, buf); in kmem_cache_alloc_debug()
1958 if (cp->cache_flags & KMF_AUDIT) { in kmem_cache_alloc_debug()
1959 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_cache_alloc_debug()
1962 if ((cp->cache_flags & KMF_LITE) && in kmem_cache_alloc_debug()
1963 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { in kmem_cache_alloc_debug()
1971 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller) in kmem_cache_free_debug() argument
1973 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_cache_free_debug()
1979 kmem_error(KMERR_DUPFREE, cp, buf); in kmem_cache_free_debug()
1982 sp = kmem_findslab(cp, buf); in kmem_cache_free_debug()
1983 if (sp == NULL || sp->slab_cache != cp) in kmem_cache_free_debug()
1984 kmem_error(KMERR_BADADDR, cp, buf); in kmem_cache_free_debug()
1986 kmem_error(KMERR_REDZONE, cp, buf); in kmem_cache_free_debug()
1992 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { in kmem_cache_free_debug()
1993 kmem_error(KMERR_BADBUFCTL, cp, buf); in kmem_cache_free_debug()
1998 kmem_error(KMERR_REDZONE, cp, buf); in kmem_cache_free_debug()
2002 if (cp->cache_flags & KMF_AUDIT) { in kmem_cache_free_debug()
2003 if (cp->cache_flags & KMF_CONTENTS) in kmem_cache_free_debug()
2005 buf, cp->cache_contents); in kmem_cache_free_debug()
2006 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_cache_free_debug()
2009 if ((cp->cache_flags & KMF_LITE) && in kmem_cache_free_debug()
2010 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { in kmem_cache_free_debug()
2014 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_free_debug()
2015 if (cp->cache_flags & KMF_LITE) in kmem_cache_free_debug()
2017 else if (cp->cache_destructor != NULL) in kmem_cache_free_debug()
2018 cp->cache_destructor(buf, cp->cache_private); in kmem_cache_free_debug()
2020 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_cache_free_debug()
2030 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds) in kmem_magazine_destroy() argument
2034 ASSERT(!list_link_active(&cp->cache_link) || in kmem_magazine_destroy()
2040 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_magazine_destroy()
2042 cp->cache_verify) != NULL) { in kmem_magazine_destroy()
2043 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_magazine_destroy()
2046 if ((cp->cache_flags & KMF_LITE) && in kmem_magazine_destroy()
2047 cp->cache_destructor != NULL) { in kmem_magazine_destroy()
2048 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_magazine_destroy()
2050 cp->cache_destructor(buf, cp->cache_private); in kmem_magazine_destroy()
2053 } else if (cp->cache_destructor != NULL) { in kmem_magazine_destroy()
2054 cp->cache_destructor(buf, cp->cache_private); in kmem_magazine_destroy()
2057 kmem_slab_free(cp, buf); in kmem_magazine_destroy()
2059 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_magazine_destroy()
2060 kmem_cache_free(cp->cache_magtype->mt_cache, mp); in kmem_magazine_destroy()
2067 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp) in kmem_depot_alloc() argument
2077 if (!mutex_tryenter(&cp->cache_depot_lock)) { in kmem_depot_alloc()
2078 mutex_enter(&cp->cache_depot_lock); in kmem_depot_alloc()
2079 cp->cache_depot_contention++; in kmem_depot_alloc()
2083 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_depot_alloc()
2090 mutex_exit(&cp->cache_depot_lock); in kmem_depot_alloc()
2099 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp) in kmem_depot_free() argument
2101 mutex_enter(&cp->cache_depot_lock); in kmem_depot_free()
2102 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_depot_free()
2106 mutex_exit(&cp->cache_depot_lock); in kmem_depot_free()
2113 kmem_depot_ws_update(kmem_cache_t *cp) in kmem_depot_ws_update() argument
2115 mutex_enter(&cp->cache_depot_lock); in kmem_depot_ws_update()
2116 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; in kmem_depot_ws_update()
2117 cp->cache_full.ml_min = cp->cache_full.ml_total; in kmem_depot_ws_update()
2118 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; in kmem_depot_ws_update()
2119 cp->cache_empty.ml_min = cp->cache_empty.ml_total; in kmem_depot_ws_update()
2120 mutex_exit(&cp->cache_depot_lock); in kmem_depot_ws_update()
2128 kmem_depot_ws_zero(kmem_cache_t *cp) in kmem_depot_ws_zero() argument
2130 mutex_enter(&cp->cache_depot_lock); in kmem_depot_ws_zero()
2131 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total; in kmem_depot_ws_zero()
2132 cp->cache_full.ml_min = cp->cache_full.ml_total; in kmem_depot_ws_zero()
2133 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total; in kmem_depot_ws_zero()
2134 cp->cache_empty.ml_min = cp->cache_empty.ml_total; in kmem_depot_ws_zero()
2135 mutex_exit(&cp->cache_depot_lock); in kmem_depot_ws_zero()
2149 kmem_depot_ws_reap(kmem_cache_t *cp) in kmem_depot_ws_reap() argument
2155 ASSERT(!list_link_active(&cp->cache_link) || in kmem_depot_ws_reap()
2158 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_depot_ws_reap()
2160 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) { in kmem_depot_ws_reap()
2161 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); in kmem_depot_ws_reap()
2162 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize; in kmem_depot_ws_reap()
2169 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); in kmem_depot_ws_reap()
2171 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) { in kmem_depot_ws_reap()
2172 kmem_magazine_destroy(cp, mp, 0); in kmem_depot_ws_reap()
2173 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize; in kmem_depot_ws_reap()
2212 #define KMEM_DUMPCTL(cp, buf) \ argument
2213 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2266 kmem_cache_t *cp; in kmem_dump_begin() local
2270 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_dump_begin()
2271 cp = list_next(&kmem_caches, cp)) { in kmem_dump_begin()
2272 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_dump_begin()
2274 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) { in kmem_dump_begin()
2275 cp->cache_flags |= KMF_DUMPDIVERT; in kmem_dump_begin()
2281 cp->cache_flags |= KMF_DUMPUNSAFE; in kmem_dump_begin()
2330 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag) in kmem_cache_alloc_dump() argument
2337 if ((buf = cp->cache_dump.kd_freelist) != NULL) { in kmem_cache_alloc_dump()
2338 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next; in kmem_cache_alloc_dump()
2344 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align); in kmem_cache_alloc_dump()
2345 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t); in kmem_cache_alloc_dump()
2348 if (cp->cache_align < PAGESIZE) { in kmem_cache_alloc_dump()
2359 cp->cache_dump.kd_alloc_fails++; in kmem_cache_alloc_dump()
2370 if (cp->cache_constructor != NULL && in kmem_cache_alloc_dump()
2371 cp->cache_constructor(buf, cp->cache_private, kmflag) in kmem_cache_alloc_dump()
2375 cp->cache_name, (void *)cp); in kmem_cache_alloc_dump()
2381 cp->cache_dump.kd_alloc_fails++; in kmem_cache_alloc_dump()
2393 kmem_cache_free_dump(kmem_cache_t *cp, void *buf) in kmem_cache_free_dump() argument
2398 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist; in kmem_cache_free_dump()
2399 cp->cache_dump.kd_freelist = buf; in kmem_cache_free_dump()
2415 kmem_cache_alloc(kmem_cache_t *cp, int kmflag) in kmem_cache_alloc() argument
2417 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_cache_alloc()
2435 cp->cache_dump.kd_unsafe++; in kmem_cache_alloc()
2438 kmem_cache_alloc_debug(cp, buf, kmflag, 0, in kmem_cache_alloc()
2466 cp->cache_dump.kd_unsafe++; in kmem_cache_alloc()
2468 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) != in kmem_cache_alloc()
2486 fmp = kmem_depot_alloc(cp, &cp->cache_full); in kmem_cache_alloc()
2489 kmem_depot_free(cp, &cp->cache_empty, in kmem_cache_alloc()
2507 buf = kmem_slab_alloc(cp, kmflag); in kmem_cache_alloc()
2512 if (cp->cache_flags & KMF_BUFTAG) { in kmem_cache_alloc()
2516 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller()); in kmem_cache_alloc()
2527 return (kmem_cache_alloc(cp, kmflag)); in kmem_cache_alloc()
2532 if (cp->cache_constructor != NULL && in kmem_cache_alloc()
2533 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) { in kmem_cache_alloc()
2534 atomic_inc_64(&cp->cache_alloc_fail); in kmem_cache_alloc()
2535 kmem_slab_free(cp, buf); in kmem_cache_alloc()
2549 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed) in kmem_slab_free_constructed() argument
2551 if (!freed && (cp->cache_flags & KMF_BUFTAG)) in kmem_slab_free_constructed()
2552 if (kmem_cache_free_debug(cp, buf, caller()) == -1) in kmem_slab_free_constructed()
2559 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF && in kmem_slab_free_constructed()
2560 cp->cache_destructor != NULL) { in kmem_slab_free_constructed()
2561 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */ in kmem_slab_free_constructed()
2562 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_slab_free_constructed()
2564 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_free_constructed()
2567 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_free_constructed()
2571 kmem_slab_free(cp, buf); in kmem_slab_free_constructed()
2582 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp) in kmem_cpucache_magazine_alloc() argument
2593 emp = kmem_depot_alloc(cp, &cp->cache_empty); in kmem_cpucache_magazine_alloc()
2596 kmem_depot_free(cp, &cp->cache_full, in kmem_cpucache_magazine_alloc()
2607 mtp = cp->cache_magtype; in kmem_cpucache_magazine_alloc()
2630 kmem_depot_free(cp, &cp->cache_empty, emp); in kmem_cpucache_magazine_alloc()
2645 kmem_cache_free(kmem_cache_t *cp, void *buf) in kmem_cache_free() argument
2647 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_cache_free()
2653 ASSERT(cp->cache_defrag == NULL || in kmem_cache_free()
2654 cp->cache_defrag->kmd_thread != curthread || in kmem_cache_free()
2655 (buf != cp->cache_defrag->kmd_from_buf && in kmem_cache_free()
2656 buf != cp->cache_defrag->kmd_to_buf)); in kmem_cache_free()
2662 cp->cache_dump.kd_unsafe++; in kmem_cache_free()
2663 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) { in kmem_cache_free()
2667 if (kmem_cache_free_debug(cp, buf, caller()) == -1) in kmem_cache_free()
2703 if (!kmem_cpucache_magazine_alloc(ccp, cp)) { in kmem_cache_free()
2713 kmem_slab_free_constructed(cp, buf, B_TRUE); in kmem_cache_free()
2717 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp) in kmem_slab_prefill() argument
2719 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_slab_prefill()
2720 int cache_flags = cp->cache_flags; in kmem_slab_prefill()
2730 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_prefill()
2732 ASSERT(cp->cache_constructor == NULL); in kmem_slab_prefill()
2733 ASSERT(sp->slab_cache == cp); in kmem_slab_prefill()
2736 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL); in kmem_slab_prefill()
2742 cp->cache_bufslab -= nbufs; in kmem_slab_prefill()
2743 cp->cache_slab_alloc += nbufs; in kmem_slab_prefill()
2744 list_insert_head(&cp->cache_complete_slabs, sp); in kmem_slab_prefill()
2745 cp->cache_complete_slab_count++; in kmem_slab_prefill()
2746 mutex_exit(&cp->cache_lock); in kmem_slab_prefill()
2750 void *buf = KMEM_BUF(cp, head); in kmem_slab_prefill()
2784 if (!kmem_cpucache_magazine_alloc(ccp, cp)) in kmem_slab_prefill()
2799 kmem_slab_free(cp, KMEM_BUF(cp, head)); in kmem_slab_prefill()
2806 mutex_enter(&cp->cache_lock); in kmem_slab_prefill()
2816 kmem_cache_t *cp = kmem_alloc_table[index]; in kmem_zalloc() local
2817 buf = kmem_cache_alloc(cp, kmflag); in kmem_zalloc()
2819 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { in kmem_zalloc()
2820 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_zalloc()
2824 if (cp->cache_flags & KMF_LITE) { in kmem_zalloc()
2843 kmem_cache_t *cp; in kmem_alloc() local
2847 cp = kmem_alloc_table[index]; in kmem_alloc()
2852 cp = kmem_big_alloc_table[index]; in kmem_alloc()
2873 buf = kmem_cache_alloc(cp, kmflag); in kmem_alloc()
2874 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) { in kmem_alloc()
2875 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_alloc()
2879 if (cp->cache_flags & KMF_LITE) { in kmem_alloc()
2890 kmem_cache_t *cp; in kmem_free() local
2893 cp = kmem_alloc_table[index]; in kmem_free()
2898 cp = kmem_big_alloc_table[index]; in kmem_free()
2909 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { in kmem_free()
2910 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_free()
2914 kmem_error(KMERR_DUPFREE, cp, buf); in kmem_free()
2919 kmem_error(KMERR_BADSIZE, cp, buf); in kmem_free()
2921 kmem_error(KMERR_REDZONE, cp, buf); in kmem_free()
2926 kmem_error(KMERR_REDZONE, cp, buf); in kmem_free()
2930 if (cp->cache_flags & KMF_LITE) { in kmem_free()
2935 kmem_cache_free(cp, buf); in kmem_free()
3008 kmem_cache_reap(kmem_cache_t *cp) in kmem_cache_reap() argument
3011 cp->cache_reap++; in kmem_cache_reap()
3020 if (cp->cache_reclaim != NULL) { in kmem_cache_reap()
3027 delta = cp->cache_full.ml_total; in kmem_cache_reap()
3028 cp->cache_reclaim(cp->cache_private); in kmem_cache_reap()
3029 delta = cp->cache_full.ml_total - delta; in kmem_cache_reap()
3031 mutex_enter(&cp->cache_depot_lock); in kmem_cache_reap()
3032 cp->cache_full.ml_reaplimit += delta; in kmem_cache_reap()
3033 cp->cache_full.ml_min += delta; in kmem_cache_reap()
3034 mutex_exit(&cp->cache_depot_lock); in kmem_cache_reap()
3038 kmem_depot_ws_reap(cp); in kmem_cache_reap()
3040 if (cp->cache_defrag != NULL && !kmem_move_noreap) { in kmem_cache_reap()
3041 kmem_cache_defrag(cp); in kmem_cache_reap()
3138 kmem_cache_magazine_purge(kmem_cache_t *cp) in kmem_cache_magazine_purge() argument
3144 ASSERT(!list_link_active(&cp->cache_link) || in kmem_cache_magazine_purge()
3146 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_cache_magazine_purge()
3149 ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_magazine_purge()
3164 kmem_magazine_destroy(cp, mp, rounds); in kmem_cache_magazine_purge()
3166 kmem_magazine_destroy(cp, pmp, prounds); in kmem_cache_magazine_purge()
3169 kmem_depot_ws_zero(cp); in kmem_cache_magazine_purge()
3170 kmem_depot_ws_reap(cp); in kmem_cache_magazine_purge()
3177 kmem_cache_magazine_enable(kmem_cache_t *cp) in kmem_cache_magazine_enable() argument
3181 if (cp->cache_flags & KMF_NOMAGAZINE) in kmem_cache_magazine_enable()
3185 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_magazine_enable()
3187 ccp->cc_magsize = cp->cache_magtype->mt_magsize; in kmem_cache_magazine_enable()
3197 kmem_cache_reap_now(kmem_cache_t *cp) in kmem_cache_reap_now() argument
3199 ASSERT(list_link_active(&cp->cache_link)); in kmem_cache_reap_now()
3201 kmem_depot_ws_zero(cp); in kmem_cache_reap_now()
3204 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP); in kmem_cache_reap_now()
3220 kmem_cache_magazine_resize(kmem_cache_t *cp) in kmem_cache_magazine_resize() argument
3222 kmem_magtype_t *mtp = cp->cache_magtype; in kmem_cache_magazine_resize()
3226 if (cp->cache_chunksize < mtp->mt_maxbuf) { in kmem_cache_magazine_resize()
3227 kmem_cache_magazine_purge(cp); in kmem_cache_magazine_resize()
3228 mutex_enter(&cp->cache_depot_lock); in kmem_cache_magazine_resize()
3229 cp->cache_magtype = ++mtp; in kmem_cache_magazine_resize()
3230 cp->cache_depot_contention_prev = in kmem_cache_magazine_resize()
3231 cp->cache_depot_contention + INT_MAX; in kmem_cache_magazine_resize()
3232 mutex_exit(&cp->cache_depot_lock); in kmem_cache_magazine_resize()
3233 kmem_cache_magazine_enable(cp); in kmem_cache_magazine_resize()
3242 kmem_hash_rescale(kmem_cache_t *cp) in kmem_hash_rescale() argument
3250 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); in kmem_hash_rescale()
3251 old_size = cp->cache_hash_mask + 1; in kmem_hash_rescale()
3262 mutex_enter(&cp->cache_lock); in kmem_hash_rescale()
3264 old_size = cp->cache_hash_mask + 1; in kmem_hash_rescale()
3265 old_table = cp->cache_hash_table; in kmem_hash_rescale()
3267 cp->cache_hash_mask = new_size - 1; in kmem_hash_rescale()
3268 cp->cache_hash_table = new_table; in kmem_hash_rescale()
3269 cp->cache_rescale++; in kmem_hash_rescale()
3276 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr); in kmem_hash_rescale()
3283 mutex_exit(&cp->cache_lock); in kmem_hash_rescale()
3293 kmem_cache_update(kmem_cache_t *cp) in kmem_cache_update() argument
3304 mutex_enter(&cp->cache_lock); in kmem_cache_update()
3306 if ((cp->cache_flags & KMF_HASH) && in kmem_cache_update()
3307 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || in kmem_cache_update()
3308 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && in kmem_cache_update()
3309 cp->cache_hash_mask > KMEM_HASH_INITIAL))) in kmem_cache_update()
3312 mutex_exit(&cp->cache_lock); in kmem_cache_update()
3317 kmem_depot_ws_update(cp); in kmem_cache_update()
3323 mutex_enter(&cp->cache_depot_lock); in kmem_cache_update()
3325 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && in kmem_cache_update()
3326 (int)(cp->cache_depot_contention - in kmem_cache_update()
3327 cp->cache_depot_contention_prev) > kmem_depot_contention) in kmem_cache_update()
3330 cp->cache_depot_contention_prev = cp->cache_depot_contention; in kmem_cache_update()
3332 mutex_exit(&cp->cache_depot_lock); in kmem_cache_update()
3336 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP); in kmem_cache_update()
3340 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP); in kmem_cache_update()
3342 if (cp->cache_defrag != NULL) in kmem_cache_update()
3344 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP); in kmem_cache_update()
3373 kmem_cache_t *cp = ksp->ks_private; in kmem_cache_kstat_update() local
3384 mutex_enter(&cp->cache_lock); in kmem_cache_kstat_update()
3386 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail; in kmem_cache_kstat_update()
3387 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc; in kmem_cache_kstat_update()
3388 kmcp->kmc_free.value.ui64 = cp->cache_slab_free; in kmem_cache_kstat_update()
3389 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc; in kmem_cache_kstat_update()
3390 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free; in kmem_cache_kstat_update()
3393 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_kstat_update()
3410 mutex_enter(&cp->cache_depot_lock); in kmem_cache_kstat_update()
3412 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc; in kmem_cache_kstat_update()
3413 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc; in kmem_cache_kstat_update()
3414 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention; in kmem_cache_kstat_update()
3415 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total; in kmem_cache_kstat_update()
3416 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total; in kmem_cache_kstat_update()
3418 (cp->cache_flags & KMF_NOMAGAZINE) ? in kmem_cache_kstat_update()
3419 0 : cp->cache_magtype->mt_magsize; in kmem_cache_kstat_update()
3421 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc; in kmem_cache_kstat_update()
3422 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc; in kmem_cache_kstat_update()
3423 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize; in kmem_cache_kstat_update()
3425 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_cache_kstat_update()
3426 reap = MIN(reap, cp->cache_full.ml_total); in kmem_cache_kstat_update()
3428 mutex_exit(&cp->cache_depot_lock); in kmem_cache_kstat_update()
3430 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize; in kmem_cache_kstat_update()
3431 kmcp->kmc_align.value.ui64 = cp->cache_align; in kmem_cache_kstat_update()
3432 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize; in kmem_cache_kstat_update()
3433 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize; in kmem_cache_kstat_update()
3435 buf_avail += cp->cache_bufslab; in kmem_cache_kstat_update()
3437 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail; in kmem_cache_kstat_update()
3438 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal; in kmem_cache_kstat_update()
3439 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax; in kmem_cache_kstat_update()
3440 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create; in kmem_cache_kstat_update()
3441 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy; in kmem_cache_kstat_update()
3442 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ? in kmem_cache_kstat_update()
3443 cp->cache_hash_mask + 1 : 0; in kmem_cache_kstat_update()
3444 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth; in kmem_cache_kstat_update()
3445 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale; in kmem_cache_kstat_update()
3446 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id; in kmem_cache_kstat_update()
3447 kmcp->kmc_reap.value.ui64 = cp->cache_reap; in kmem_cache_kstat_update()
3449 if (cp->cache_defrag == NULL) { in kmem_cache_kstat_update()
3464 kmem_defrag_t *kd = cp->cache_defrag; in kmem_cache_kstat_update()
3476 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1); in kmem_cache_kstat_update()
3478 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize); in kmem_cache_kstat_update()
3482 mutex_exit(&cp->cache_lock); in kmem_cache_kstat_update()
3492 kmem_cache_stat(kmem_cache_t *cp, char *name) in kmem_cache_stat() argument
3495 kstat_t *ksp = cp->cache_kstat; in kmem_cache_stat()
3582 const kmem_cache_t *cp; in kmem_partial_slab_cmp() local
3591 cp = s1->slab_cache; in kmem_partial_slab_cmp()
3592 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_partial_slab_cmp()
3593 binshift = cp->cache_partial_binshift; in kmem_partial_slab_cmp()
3598 w0 -= cp->cache_maxchunks; in kmem_partial_slab_cmp()
3604 w1 -= cp->cache_maxchunks; in kmem_partial_slab_cmp()
3640 kmem_cache_t *cp; in kmem_cache_create() local
3672 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE, in kmem_cache_create()
3674 bzero(cp, csize); in kmem_cache_create()
3675 list_link_init(&cp->cache_link); in kmem_cache_create()
3695 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG; in kmem_cache_create()
3703 if (cp->cache_flags & KMF_LITE) { in kmem_cache_create()
3707 cp->cache_flags |= KMF_BUFTAG; in kmem_cache_create()
3708 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); in kmem_cache_create()
3710 cp->cache_flags &= ~KMF_DEBUG; in kmem_cache_create()
3714 if (cp->cache_flags & KMF_DEADBEEF) in kmem_cache_create()
3715 cp->cache_flags |= KMF_REDZONE; in kmem_cache_create()
3717 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT)) in kmem_cache_create()
3718 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3721 cp->cache_flags &= ~KMF_DEBUG; in kmem_cache_create()
3724 cp->cache_flags &= ~KMF_TOUCH; in kmem_cache_create()
3727 cp->cache_flags |= KMF_PREFILL; in kmem_cache_create()
3730 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); in kmem_cache_create()
3733 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3735 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH)) in kmem_cache_create()
3736 cp->cache_flags |= KMF_REDZONE; in kmem_cache_create()
3738 if (!(cp->cache_flags & KMF_AUDIT)) in kmem_cache_create()
3739 cp->cache_flags &= ~KMF_CONTENTS; in kmem_cache_create()
3741 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall && in kmem_cache_create()
3742 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH)) in kmem_cache_create()
3743 cp->cache_flags |= KMF_FIREWALL; in kmem_cache_create()
3746 cp->cache_flags &= ~KMF_FIREWALL; in kmem_cache_create()
3748 if (cp->cache_flags & KMF_FIREWALL) { in kmem_cache_create()
3749 cp->cache_flags &= ~KMF_BUFTAG; in kmem_cache_create()
3750 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3758 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN); in kmem_cache_create()
3759 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1); in kmem_cache_create()
3760 cp->cache_bufsize = bufsize; in kmem_cache_create()
3761 cp->cache_align = align; in kmem_cache_create()
3762 cp->cache_constructor = constructor; in kmem_cache_create()
3763 cp->cache_destructor = destructor; in kmem_cache_create()
3764 cp->cache_reclaim = reclaim; in kmem_cache_create()
3765 cp->cache_private = private; in kmem_cache_create()
3766 cp->cache_arena = vmp; in kmem_cache_create()
3767 cp->cache_cflags = cflags; in kmem_cache_create()
3776 cp->cache_bufctl = chunksize - KMEM_ALIGN; in kmem_cache_create()
3779 if (cp->cache_flags & KMF_BUFTAG) { in kmem_cache_create()
3780 cp->cache_bufctl = chunksize; in kmem_cache_create()
3781 cp->cache_buftag = chunksize; in kmem_cache_create()
3782 if (cp->cache_flags & KMF_LITE) in kmem_cache_create()
3788 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_create()
3789 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify); in kmem_cache_create()
3790 if (cp->cache_flags & KMF_LITE) in kmem_cache_create()
3791 cp->cache_verify = sizeof (uint64_t); in kmem_cache_create()
3794 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave); in kmem_cache_create()
3796 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); in kmem_cache_create()
3802 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); in kmem_cache_create()
3803 cp->cache_mincolor = cp->cache_slabsize - chunksize; in kmem_cache_create()
3804 cp->cache_maxcolor = cp->cache_mincolor; in kmem_cache_create()
3805 cp->cache_flags |= KMF_HASH; in kmem_cache_create()
3806 ASSERT(!(cp->cache_flags & KMF_BUFTAG)); in kmem_cache_create()
3808 !(cp->cache_flags & KMF_AUDIT) && in kmem_cache_create()
3810 cp->cache_slabsize = vmp->vm_quantum; in kmem_cache_create()
3811 cp->cache_mincolor = 0; in kmem_cache_create()
3812 cp->cache_maxcolor = in kmem_cache_create()
3813 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize; in kmem_cache_create()
3814 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize); in kmem_cache_create()
3815 ASSERT(!(cp->cache_flags & KMF_AUDIT)); in kmem_cache_create()
3832 cp->cache_slabsize = bestfit; in kmem_cache_create()
3833 cp->cache_mincolor = 0; in kmem_cache_create()
3834 cp->cache_maxcolor = bestfit % chunksize; in kmem_cache_create()
3835 cp->cache_flags |= KMF_HASH; in kmem_cache_create()
3838 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize); in kmem_cache_create()
3839 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1; in kmem_cache_create()
3848 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) || in kmem_cache_create()
3849 cp->cache_constructor != NULL) in kmem_cache_create()
3850 cp->cache_flags &= ~KMF_PREFILL; in kmem_cache_create()
3852 if (cp->cache_flags & KMF_HASH) { in kmem_cache_create()
3854 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ? in kmem_cache_create()
3858 if (cp->cache_maxcolor >= vmp->vm_quantum) in kmem_cache_create()
3859 cp->cache_maxcolor = vmp->vm_quantum - 1; in kmem_cache_create()
3861 cp->cache_color = cp->cache_mincolor; in kmem_cache_create()
3866 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL); in kmem_cache_create()
3868 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp, in kmem_cache_create()
3873 list_create(&cp->cache_complete_slabs, in kmem_cache_create()
3876 if (cp->cache_flags & KMF_HASH) { in kmem_cache_create()
3877 cp->cache_hash_table = vmem_alloc(kmem_hash_arena, in kmem_cache_create()
3879 bzero(cp->cache_hash_table, in kmem_cache_create()
3881 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1; in kmem_cache_create()
3882 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; in kmem_cache_create()
3888 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL); in kmem_cache_create()
3893 cp->cache_magtype = mtp; in kmem_cache_create()
3899 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_create()
3901 ccp->cc_flags = cp->cache_flags; in kmem_cache_create()
3909 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name, in kmem_cache_create()
3913 cp->cache_kstat->ks_data = &kmem_cache_kstat; in kmem_cache_create()
3914 cp->cache_kstat->ks_update = kmem_cache_kstat_update; in kmem_cache_create()
3915 cp->cache_kstat->ks_private = cp; in kmem_cache_create()
3916 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock; in kmem_cache_create()
3917 kstat_install(cp->cache_kstat); in kmem_cache_create()
3925 list_insert_tail(&kmem_caches, cp); in kmem_cache_create()
3929 kmem_cache_magazine_enable(cp); in kmem_cache_create()
3931 return (cp); in kmem_cache_create()
3974 kmem_cache_set_move(kmem_cache_t *cp, in kmem_cache_set_move() argument
3986 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH)); in kmem_cache_set_move()
3987 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER)); in kmem_cache_set_move()
3996 mutex_enter(&cp->cache_lock); in kmem_cache_set_move()
3998 if (KMEM_IS_MOVABLE(cp)) { in kmem_cache_set_move()
3999 if (cp->cache_move == NULL) { in kmem_cache_set_move()
4000 ASSERT(cp->cache_slab_alloc == 0); in kmem_cache_set_move()
4002 cp->cache_defrag = defrag; in kmem_cache_set_move()
4004 bzero(cp->cache_defrag, sizeof (kmem_defrag_t)); in kmem_cache_set_move()
4005 avl_create(&cp->cache_defrag->kmd_moves_pending, in kmem_cache_set_move()
4011 list_create(&cp->cache_defrag->kmd_deadlist, in kmem_cache_set_move()
4014 kmem_reset_reclaim_threshold(cp->cache_defrag); in kmem_cache_set_move()
4016 cp->cache_move = move; in kmem_cache_set_move()
4019 mutex_exit(&cp->cache_lock); in kmem_cache_set_move()
4027 kmem_cache_destroy(kmem_cache_t *cp) in kmem_cache_destroy() argument
4037 list_remove(&kmem_caches, cp); in kmem_cache_destroy()
4043 if (kmem_move_taskq != NULL && cp->cache_defrag != NULL) in kmem_cache_destroy()
4046 kmem_cache_magazine_purge(cp); in kmem_cache_destroy()
4048 mutex_enter(&cp->cache_lock); in kmem_cache_destroy()
4049 if (cp->cache_buftotal != 0) in kmem_cache_destroy()
4051 cp->cache_name, (void *)cp); in kmem_cache_destroy()
4052 if (cp->cache_defrag != NULL) { in kmem_cache_destroy()
4053 avl_destroy(&cp->cache_defrag->kmd_moves_pending); in kmem_cache_destroy()
4054 list_destroy(&cp->cache_defrag->kmd_deadlist); in kmem_cache_destroy()
4055 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag); in kmem_cache_destroy()
4056 cp->cache_defrag = NULL; in kmem_cache_destroy()
4064 cp->cache_constructor = (int (*)(void *, void *, int))1; in kmem_cache_destroy()
4065 cp->cache_destructor = (void (*)(void *, void *))2; in kmem_cache_destroy()
4066 cp->cache_reclaim = (void (*)(void *))3; in kmem_cache_destroy()
4067 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4; in kmem_cache_destroy()
4068 mutex_exit(&cp->cache_lock); in kmem_cache_destroy()
4070 kstat_delete(cp->cache_kstat); in kmem_cache_destroy()
4072 if (cp->cache_hash_table != NULL) in kmem_cache_destroy()
4073 vmem_free(kmem_hash_arena, cp->cache_hash_table, in kmem_cache_destroy()
4074 (cp->cache_hash_mask + 1) * sizeof (void *)); in kmem_cache_destroy()
4077 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); in kmem_cache_destroy()
4079 mutex_destroy(&cp->cache_depot_lock); in kmem_cache_destroy()
4080 mutex_destroy(&cp->cache_lock); in kmem_cache_destroy()
4082 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus)); in kmem_cache_destroy()
4111 kmem_cache_t *cp; in kmem_alloc_caches_create() local
4130 cp = kmem_cache_create(name, cache_size, align, in kmem_alloc_caches_create()
4134 alloc_table[(size - 1) >> shift] = cp; in kmem_alloc_caches_create()
4243 kmem_cache_t *cp; in kmem_init() local
4312 while ((cp = list_tail(&kmem_caches)) != NULL) in kmem_init()
4313 kmem_cache_destroy(cp); in kmem_init()
4511 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf) in kmem_slab_allocated() argument
4515 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_allocated()
4518 if (cp->cache_flags & KMF_HASH) { in kmem_slab_allocated()
4519 for (bcp = *KMEM_HASH(cp, buf); in kmem_slab_allocated()
4529 sp = KMEM_SLAB(cp, buf); in kmem_slab_allocated()
4531 bufbcp = KMEM_BUFCTL(cp, buf); in kmem_slab_allocated()
4541 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags) in kmem_slab_is_reclaimable() argument
4545 ASSERT(cp->cache_defrag != NULL); in kmem_slab_is_reclaimable()
4580 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer)); in kmem_slab_is_reclaimable()
4588 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) in kmem_slab_move_yes() argument
4590 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_move_yes()
4599 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_move_yes()
4602 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_move_yes()
4611 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) in kmem_slab_move_no() argument
4614 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_move_no()
4621 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_move_no()
4625 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_move_no()
4666 kmem_cache_t *cp = sp->slab_cache; in kmem_move_buffer() local
4670 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_buffer()
4679 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) { in kmem_move_buffer()
4680 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4681 kmem_move_end(cp, callback); in kmem_move_buffer()
4689 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4690 free_on_slab = (kmem_slab_allocated(cp, sp, in kmem_move_buffer()
4692 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4695 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4696 kmem_move_end(cp, callback); in kmem_move_buffer()
4700 if (cp->cache_flags & KMF_BUFTAG) { in kmem_move_buffer()
4704 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf, in kmem_move_buffer()
4706 kmem_move_end(cp, callback); in kmem_move_buffer()
4709 } else if (cp->cache_constructor != NULL && in kmem_move_buffer()
4710 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private, in kmem_move_buffer()
4712 atomic_inc_64(&cp->cache_alloc_fail); in kmem_move_buffer()
4713 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4714 kmem_move_end(cp, callback); in kmem_move_buffer()
4718 cp->cache_defrag->kmd_callbacks++; in kmem_move_buffer()
4719 cp->cache_defrag->kmd_thread = curthread; in kmem_move_buffer()
4720 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf; in kmem_move_buffer()
4721 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf; in kmem_move_buffer()
4722 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *, in kmem_move_buffer()
4725 response = cp->cache_move(callback->kmm_from_buf, in kmem_move_buffer()
4726 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private); in kmem_move_buffer()
4728 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *, in kmem_move_buffer()
4730 cp->cache_defrag->kmd_thread = NULL; in kmem_move_buffer()
4731 cp->cache_defrag->kmd_from_buf = NULL; in kmem_move_buffer()
4732 cp->cache_defrag->kmd_to_buf = NULL; in kmem_move_buffer()
4735 cp->cache_defrag->kmd_yes++; in kmem_move_buffer()
4736 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); in kmem_move_buffer()
4739 cp->cache_defrag->kmd_slabs_freed++; in kmem_move_buffer()
4740 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4741 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4742 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4743 kmem_move_end(cp, callback); in kmem_move_buffer()
4749 cp->cache_defrag->kmd_no++; in kmem_move_buffer()
4750 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4751 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4752 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4755 cp->cache_defrag->kmd_later++; in kmem_move_buffer()
4756 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4758 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4763 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4768 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4771 cp->cache_defrag->kmd_dont_need++; in kmem_move_buffer()
4772 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); in kmem_move_buffer()
4774 cp->cache_defrag->kmd_slabs_freed++; in kmem_move_buffer()
4775 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4776 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4777 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4794 cp->cache_defrag->kmd_dont_know++; in kmem_move_buffer()
4798 cp->cache_name, (void *)cp, response); in kmem_move_buffer()
4801 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE); in kmem_move_buffer()
4802 kmem_move_end(cp, callback); in kmem_move_buffer()
4807 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags) in kmem_move_begin() argument
4815 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_begin()
4827 mutex_enter(&cp->cache_lock); in kmem_move_begin()
4829 n = avl_numnodes(&cp->cache_partial_slabs); in kmem_move_begin()
4831 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4836 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index); in kmem_move_begin()
4845 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4850 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs), in kmem_move_begin()
4853 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index); in kmem_move_begin()
4855 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4859 mutex_enter(&cp->cache_lock); in kmem_move_begin()
4860 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); in kmem_move_begin()
4861 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4862 kmem_slab_free(cp, to_buf); in kmem_move_begin()
4871 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback) in kmem_move_end() argument
4875 ASSERT(cp->cache_defrag != NULL); in kmem_move_end()
4877 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_end()
4879 mutex_enter(&cp->cache_lock); in kmem_move_end()
4880 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending, in kmem_move_end()
4882 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); in kmem_move_end()
4883 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) { in kmem_move_end()
4884 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_move_end()
4900 cp->cache_defrag->kmd_deadcount--; in kmem_move_end()
4901 cp->cache_slab_destroy++; in kmem_move_end()
4902 mutex_exit(&cp->cache_lock); in kmem_move_end()
4903 kmem_slab_destroy(cp, sp); in kmem_move_end()
4904 mutex_enter(&cp->cache_lock); in kmem_move_end()
4907 mutex_exit(&cp->cache_lock); in kmem_move_end()
4926 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs, in kmem_move_buffers() argument
4939 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_move_buffers()
4941 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL); in kmem_move_buffers()
4942 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) : in kmem_move_buffers()
4943 avl_numnodes(&cp->cache_partial_slabs) > 1); in kmem_move_buffers()
4966 sp = avl_last(&cp->cache_partial_slabs); in kmem_move_buffers()
4969 ((sp != avl_first(&cp->cache_partial_slabs)) || in kmem_move_buffers()
4971 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) { in kmem_move_buffers()
4973 if (!kmem_slab_is_reclaimable(cp, sp, flags)) { in kmem_move_buffers()
4981 buf = (((char *)buf) + cp->cache_chunksize), j++) { in kmem_move_buffers()
4983 if (kmem_slab_allocated(cp, sp, buf) == NULL) { in kmem_move_buffers()
5007 mutex_exit(&cp->cache_lock); in kmem_move_buffers()
5009 success = kmem_move_begin(cp, sp, buf, flags); in kmem_move_buffers()
5023 mutex_enter(&cp->cache_lock); in kmem_move_buffers()
5029 &cp->cache_defrag->kmd_deadlist; in kmem_move_buffers()
5033 &cp->cache_defrag->kmd_moves_pending)) { in kmem_move_buffers()
5058 cp->cache_defrag->kmd_deadcount--; in kmem_move_buffers()
5059 cp->cache_slab_destroy++; in kmem_move_buffers()
5060 mutex_exit(&cp->cache_lock); in kmem_move_buffers()
5061 kmem_slab_destroy(cp, sp); in kmem_move_buffers()
5062 mutex_enter(&cp->cache_lock); in kmem_move_buffers()
5110 ASSERT(!avl_is_empty(&cp->cache_partial_slabs)); in kmem_move_buffers()
5111 if (sp == avl_first(&cp->cache_partial_slabs)) { in kmem_move_buffers()
5134 kmem_cache_t *cp = args->kmna_cache; in kmem_cache_move_notify_task() local
5139 ASSERT(list_link_active(&cp->cache_link)); in kmem_cache_move_notify_task()
5142 mutex_enter(&cp->cache_lock); in kmem_cache_move_notify_task()
5143 sp = kmem_slab_allocated(cp, NULL, buf); in kmem_cache_move_notify_task()
5147 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5152 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { in kmem_cache_move_notify_task()
5160 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5164 kmem_slab_move_yes(cp, sp, buf); in kmem_cache_move_notify_task()
5167 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5169 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY); in kmem_cache_move_notify_task()
5170 mutex_enter(&cp->cache_lock); in kmem_cache_move_notify_task()
5174 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_cache_move_notify_task()
5178 &cp->cache_defrag->kmd_moves_pending)) { in kmem_cache_move_notify_task()
5180 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5184 cp->cache_defrag->kmd_deadcount--; in kmem_cache_move_notify_task()
5185 cp->cache_slab_destroy++; in kmem_cache_move_notify_task()
5186 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5187 kmem_slab_destroy(cp, sp); in kmem_cache_move_notify_task()
5191 kmem_slab_move_yes(cp, sp, buf); in kmem_cache_move_notify_task()
5193 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5197 kmem_cache_move_notify(kmem_cache_t *cp, void *buf) in kmem_cache_move_notify() argument
5203 args->kmna_cache = cp; in kmem_cache_move_notify()
5213 kmem_cache_defrag(kmem_cache_t *cp) in kmem_cache_defrag() argument
5217 ASSERT(cp->cache_defrag != NULL); in kmem_cache_defrag()
5219 mutex_enter(&cp->cache_lock); in kmem_cache_defrag()
5220 n = avl_numnodes(&cp->cache_partial_slabs); in kmem_cache_defrag()
5223 cp->cache_defrag->kmd_defrags++; in kmem_cache_defrag()
5224 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE); in kmem_cache_defrag()
5226 mutex_exit(&cp->cache_lock); in kmem_cache_defrag()
5231 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree) in kmem_cache_frag_threshold() argument
5239 (cp->cache_buftotal * kmem_frag_numer)); in kmem_cache_frag_threshold()
5243 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap) in kmem_cache_is_fragmented() argument
5248 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_cache_is_fragmented()
5252 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { in kmem_cache_is_fragmented()
5256 if ((cp->cache_complete_slab_count + avl_numnodes( in kmem_cache_is_fragmented()
5257 &cp->cache_partial_slabs)) < kmem_frag_minslabs) { in kmem_cache_is_fragmented()
5262 nfree = cp->cache_bufslab; in kmem_cache_is_fragmented()
5263 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) && in kmem_cache_is_fragmented()
5264 kmem_cache_frag_threshold(cp, nfree)); in kmem_cache_is_fragmented()
5275 mutex_enter(&cp->cache_depot_lock); in kmem_cache_is_fragmented()
5276 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_cache_is_fragmented()
5277 reap = MIN(reap, cp->cache_full.ml_total); in kmem_cache_is_fragmented()
5278 mutex_exit(&cp->cache_depot_lock); in kmem_cache_is_fragmented()
5280 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize); in kmem_cache_is_fragmented()
5281 if (kmem_cache_frag_threshold(cp, nfree)) { in kmem_cache_is_fragmented()
5291 kmem_cache_scan(kmem_cache_t *cp) in kmem_cache_scan() argument
5298 mutex_enter(&cp->cache_lock); in kmem_cache_scan()
5300 kmd = cp->cache_defrag; in kmem_cache_scan()
5303 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5304 kmem_cache_reap(cp); in kmem_cache_scan()
5308 if (kmem_cache_is_fragmented(cp, &reap)) { in kmem_cache_scan()
5322 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range, in kmem_cache_scan()
5347 kmem_reset_reclaim_threshold(cp->cache_defrag); in kmem_cache_scan()
5349 if (!avl_is_empty(&cp->cache_partial_slabs)) { in kmem_cache_scan()
5360 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5361 kmem_cache_reap(cp); in kmem_cache_scan()
5365 (void) kmem_move_buffers(cp, in kmem_cache_scan()
5372 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5375 kmem_depot_ws_reap(cp); in kmem_cache_scan()