Lines Matching defs:cp
1179 kmem_cache_t *cp;
1182 for (cp = list_head(&kmem_caches); cp != NULL;
1183 cp = list_next(&kmem_caches, cp))
1185 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1188 func(cp);
1195 kmem_cache_t *cp;
1198 for (cp = list_head(&kmem_caches); cp != NULL;
1199 cp = list_next(&kmem_caches, cp)) {
1200 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1203 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1206 func(cp);
1215 kmem_findslab(kmem_cache_t *cp, void *buf)
1219 mutex_enter(&cp->cache_lock);
1220 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1221 sp = list_next(&cp->cache_complete_slabs, sp)) {
1223 mutex_exit(&cp->cache_lock);
1227 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1228 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1230 mutex_exit(&cp->cache_lock);
1234 mutex_exit(&cp->cache_lock);
1244 kmem_cache_t *cp = cparg;
1253 sp = kmem_findslab(cp, buf);
1255 for (cp = list_tail(&kmem_caches); cp != NULL;
1256 cp = list_prev(&kmem_caches, cp)) {
1257 if ((sp = kmem_findslab(cp, buf)) != NULL)
1263 cp = NULL;
1266 if (cp != cparg)
1270 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1273 if (cp->cache_flags & KMF_BUFTAG)
1274 btp = KMEM_BUFTAG(cp, buf);
1275 if (cp->cache_flags & KMF_HASH) {
1276 mutex_enter(&cp->cache_lock);
1277 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1280 mutex_exit(&cp->cache_lock);
1283 if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1296 kmem_panic_info.kmp_realcache = cp;
1306 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1340 printf("buffer was allocated from %s,\n", cp->cache_name);
1359 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1369 (void *)sp, cp->cache_name);
1459 #define KMEM_AUDIT(lp, cp, bcp) \
1469 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1477 bca.bc_cache = cp;
1478 KMEM_AUDIT(lp, cp, &bca);
1482 * Create a new slab for cache cp.
1485 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1487 size_t slabsize = cp->cache_slabsize;
1488 size_t chunksize = cp->cache_chunksize;
1489 int cache_flags = cp->cache_flags;
1494 vmem_t *vmp = cp->cache_arena;
1496 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1498 color = cp->cache_color + cp->cache_align;
1499 if (color > cp->cache_maxcolor)
1500 color = cp->cache_mincolor;
1501 cp->cache_color = color;
1516 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1517 if (!(cp->cache_cflags & KMC_NOTOUCH))
1525 sp = KMEM_SLAB(cp, slab);
1529 sp->slab_cache = cp;
1541 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1548 bcap->bc_cache = cp;
1553 bcp = KMEM_BUFCTL(cp, buf);
1556 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1562 cp->cache_verify);
1570 kmem_log_event(kmem_slab_log, cp, sp, slab);
1578 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1588 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1589 atomic_inc_64(&cp->cache_alloc_fail);
1598 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1600 vmem_t *vmp = cp->cache_arena;
1603 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1606 if (cp->cache_flags & KMF_HASH) {
1610 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1614 vmem_free(vmp, slab, cp->cache_slabsize);
1618 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1624 ASSERT(MUTEX_HELD(&cp->cache_lock));
1627 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1631 (sp == avl_first(&cp->cache_partial_slabs))));
1632 ASSERT(sp->slab_cache == cp);
1634 cp->cache_slab_alloc++;
1635 cp->cache_bufslab--;
1641 if (cp->cache_flags & KMF_HASH) {
1646 hash_bucket = KMEM_HASH(cp, buf);
1649 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1650 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1653 buf = KMEM_BUF(cp, bcp);
1664 avl_remove(&cp->cache_partial_slabs, sp);
1669 list_insert_head(&cp->cache_complete_slabs, sp);
1670 cp->cache_complete_slab_count++;
1680 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1681 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1682 kmem_slab_prefill(cp, sp);
1687 avl_add(&cp->cache_partial_slabs, sp);
1695 ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1700 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1703 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1709 mutex_enter(&cp->cache_lock);
1710 test_destructor = (cp->cache_slab_alloc == 0);
1711 sp = avl_first(&cp->cache_partial_slabs);
1713 ASSERT(cp->cache_bufslab == 0);
1718 mutex_exit(&cp->cache_lock);
1719 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1722 mutex_enter(&cp->cache_lock);
1723 cp->cache_slab_create++;
1724 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1725 cp->cache_bufmax = cp->cache_buftotal;
1726 cp->cache_bufslab += sp->slab_chunks;
1729 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1730 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1731 (cp->cache_complete_slab_count +
1732 avl_numnodes(&cp->cache_partial_slabs) +
1733 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1734 mutex_exit(&cp->cache_lock);
1736 if (test_destructor && cp->cache_destructor != NULL) {
1742 if ((cp->cache_constructor == NULL) ||
1743 cp->cache_constructor(buf, cp->cache_private,
1745 cp->cache_destructor(buf, cp->cache_private);
1748 cp->cache_bufsize);
1749 if (cp->cache_flags & KMF_DEADBEEF) {
1750 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1760 * Free a raw (unconstructed) buffer to cp's slab layer.
1763 kmem_slab_free(kmem_cache_t *cp, void *buf)
1770 mutex_enter(&cp->cache_lock);
1771 cp->cache_slab_free++;
1773 if (cp->cache_flags & KMF_HASH) {
1777 prev_bcpp = KMEM_HASH(cp, buf);
1784 cp->cache_lookup_depth++;
1788 bcp = KMEM_BUFCTL(cp, buf);
1789 sp = KMEM_SLAB(cp, buf);
1792 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1793 mutex_exit(&cp->cache_lock);
1794 kmem_error(KMERR_BADADDR, cp, buf);
1806 kmem_slab_move_yes(cp, sp, buf);
1809 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1810 if (cp->cache_flags & KMF_CONTENTS)
1813 cp->cache_contents);
1814 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1820 cp->cache_bufslab++;
1829 list_remove(&cp->cache_complete_slabs, sp);
1830 cp->cache_complete_slab_count--;
1832 avl_remove(&cp->cache_partial_slabs, sp);
1835 cp->cache_buftotal -= sp->slab_chunks;
1836 cp->cache_bufslab -= sp->slab_chunks;
1848 if (cp->cache_defrag == NULL ||
1849 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1851 cp->cache_slab_destroy++;
1852 mutex_exit(&cp->cache_lock);
1853 kmem_slab_destroy(cp, sp);
1855 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1869 cp->cache_defrag->kmd_deadcount++;
1870 mutex_exit(&cp->cache_lock);
1879 list_remove(&cp->cache_complete_slabs, sp);
1880 cp->cache_complete_slab_count--;
1881 avl_add(&cp->cache_partial_slabs, sp);
1883 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1886 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1887 (cp->cache_complete_slab_count +
1888 avl_numnodes(&cp->cache_partial_slabs) +
1889 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1890 mutex_exit(&cp->cache_lock);
1897 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1900 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1905 kmem_error(KMERR_BADBUFTAG, cp, buf);
1911 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1912 kmem_error(KMERR_BADBUFCTL, cp, buf);
1916 if (cp->cache_flags & KMF_DEADBEEF) {
1917 if (!construct && (cp->cache_flags & KMF_LITE)) {
1919 kmem_error(KMERR_MODIFIED, cp, buf);
1922 if (cp->cache_constructor != NULL)
1930 cp->cache_verify)) {
1931 kmem_error(KMERR_MODIFIED, cp, buf);
1938 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1941 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1942 if (!construct && cp->cache_destructor != NULL)
1943 cp->cache_destructor(buf, cp->cache_private);
1948 if (mtbf || (construct && cp->cache_constructor != NULL &&
1949 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1950 atomic_inc_64(&cp->cache_alloc_fail);
1952 if (cp->cache_flags & KMF_DEADBEEF)
1953 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1954 kmem_slab_free(cp, buf);
1958 if (cp->cache_flags & KMF_AUDIT) {
1959 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1962 if ((cp->cache_flags & KMF_LITE) &&
1963 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1971 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1973 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1979 kmem_error(KMERR_DUPFREE, cp, buf);
1982 sp = kmem_findslab(cp, buf);
1983 if (sp == NULL || sp->slab_cache != cp)
1984 kmem_error(KMERR_BADADDR, cp, buf);
1986 kmem_error(KMERR_REDZONE, cp, buf);
1992 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1993 kmem_error(KMERR_BADBUFCTL, cp, buf);
1998 kmem_error(KMERR_REDZONE, cp, buf);
2002 if (cp->cache_flags & KMF_AUDIT) {
2003 if (cp->cache_flags & KMF_CONTENTS)
2005 buf, cp->cache_contents);
2006 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2009 if ((cp->cache_flags & KMF_LITE) &&
2010 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2014 if (cp->cache_flags & KMF_DEADBEEF) {
2015 if (cp->cache_flags & KMF_LITE)
2017 else if (cp->cache_destructor != NULL)
2018 cp->cache_destructor(buf, cp->cache_private);
2020 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2027 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2030 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2034 ASSERT(!list_link_active(&cp->cache_link) ||
2040 if (cp->cache_flags & KMF_DEADBEEF) {
2042 cp->cache_verify) != NULL) {
2043 kmem_error(KMERR_MODIFIED, cp, buf);
2046 if ((cp->cache_flags & KMF_LITE) &&
2047 cp->cache_destructor != NULL) {
2048 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2050 cp->cache_destructor(buf, cp->cache_private);
2053 } else if (cp->cache_destructor != NULL) {
2054 cp->cache_destructor(buf, cp->cache_private);
2057 kmem_slab_free(cp, buf);
2059 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2060 kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2067 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2077 if (!mutex_tryenter(&cp->cache_depot_lock)) {
2078 mutex_enter(&cp->cache_depot_lock);
2079 cp->cache_depot_contention++;
2083 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2090 mutex_exit(&cp->cache_depot_lock);
2099 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2101 mutex_enter(&cp->cache_depot_lock);
2102 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2106 mutex_exit(&cp->cache_depot_lock);
2110 * Update the working set statistics for cp's depot.
2113 kmem_depot_ws_update(kmem_cache_t *cp)
2115 mutex_enter(&cp->cache_depot_lock);
2116 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2117 cp->cache_full.ml_min = cp->cache_full.ml_total;
2118 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2119 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2120 mutex_exit(&cp->cache_depot_lock);
2124 * Set the working set statistics for cp's depot to zero. (Everything is
2128 kmem_depot_ws_zero(kmem_cache_t *cp)
2130 mutex_enter(&cp->cache_depot_lock);
2131 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2132 cp->cache_full.ml_min = cp->cache_full.ml_total;
2133 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2134 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2135 mutex_exit(&cp->cache_depot_lock);
2149 kmem_depot_ws_reap(kmem_cache_t *cp)
2155 ASSERT(!list_link_active(&cp->cache_link) ||
2158 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2160 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2161 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2162 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2169 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2171 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2172 kmem_magazine_destroy(cp, mp, 0);
2173 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2212 #define KMEM_DUMPCTL(cp, buf) \
2213 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2266 kmem_cache_t *cp;
2270 for (cp = list_head(&kmem_caches); cp != NULL;
2271 cp = list_next(&kmem_caches, cp)) {
2272 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2274 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2275 cp->cache_flags |= KMF_DUMPDIVERT;
2281 cp->cache_flags |= KMF_DUMPUNSAFE;
2330 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2337 if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2338 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2344 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2345 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2348 if (cp->cache_align < PAGESIZE) {
2359 cp->cache_dump.kd_alloc_fails++;
2370 if (cp->cache_constructor != NULL &&
2371 cp->cache_constructor(buf, cp->cache_private, kmflag)
2375 cp->cache_name, (void *)cp);
2381 cp->cache_dump.kd_alloc_fails++;
2393 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2398 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2399 cp->cache_dump.kd_freelist = buf;
2412 * Allocate a constructed object from cache cp.
2415 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2417 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2435 cp->cache_dump.kd_unsafe++;
2438 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2466 cp->cache_dump.kd_unsafe++;
2468 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2486 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2489 kmem_depot_free(cp, &cp->cache_empty,
2507 buf = kmem_slab_alloc(cp, kmflag);
2512 if (cp->cache_flags & KMF_BUFTAG) {
2516 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2527 return (kmem_cache_alloc(cp, kmflag));
2532 if (cp->cache_constructor != NULL &&
2533 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2534 atomic_inc_64(&cp->cache_alloc_fail);
2535 kmem_slab_free(cp, buf);
2549 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2551 if (!freed && (cp->cache_flags & KMF_BUFTAG))
2552 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2559 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2560 cp->cache_destructor != NULL) {
2561 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2562 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2564 cp->cache_destructor(buf, cp->cache_private);
2567 cp->cache_destructor(buf, cp->cache_private);
2571 kmem_slab_free(cp, buf);
2582 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2593 emp = kmem_depot_alloc(cp, &cp->cache_empty);
2596 kmem_depot_free(cp, &cp->cache_full,
2607 mtp = cp->cache_magtype;
2630 kmem_depot_free(cp, &cp->cache_empty, emp);
2642 * Free a constructed object to cache cp.
2645 kmem_cache_free(kmem_cache_t *cp, void *buf)
2647 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2653 ASSERT(cp->cache_defrag == NULL ||
2654 cp->cache_defrag->kmd_thread != curthread ||
2655 (buf != cp->cache_defrag->kmd_from_buf &&
2656 buf != cp->cache_defrag->kmd_to_buf));
2662 cp->cache_dump.kd_unsafe++;
2663 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2667 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2703 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2713 kmem_slab_free_constructed(cp, buf, B_TRUE);
2717 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2719 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2720 int cache_flags = cp->cache_flags;
2730 ASSERT(MUTEX_HELD(&cp->cache_lock));
2732 ASSERT(cp->cache_constructor == NULL);
2733 ASSERT(sp->slab_cache == cp);
2736 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2742 cp->cache_bufslab -= nbufs;
2743 cp->cache_slab_alloc += nbufs;
2744 list_insert_head(&cp->cache_complete_slabs, sp);
2745 cp->cache_complete_slab_count++;
2746 mutex_exit(&cp->cache_lock);
2750 void *buf = KMEM_BUF(cp, head);
2784 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2799 kmem_slab_free(cp, KMEM_BUF(cp, head));
2806 mutex_enter(&cp->cache_lock);
2816 kmem_cache_t *cp = kmem_alloc_table[index];
2817 buf = kmem_cache_alloc(cp, kmflag);
2819 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2820 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2824 if (cp->cache_flags & KMF_LITE) {
2843 kmem_cache_t *cp;
2847 cp = kmem_alloc_table[index];
2852 cp = kmem_big_alloc_table[index];
2873 buf = kmem_cache_alloc(cp, kmflag);
2874 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2875 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2879 if (cp->cache_flags & KMF_LITE) {
2890 kmem_cache_t *cp;
2893 cp = kmem_alloc_table[index];
2898 cp = kmem_big_alloc_table[index];
2909 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2910 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2914 kmem_error(KMERR_DUPFREE, cp, buf);
2919 kmem_error(KMERR_BADSIZE, cp, buf);
2921 kmem_error(KMERR_REDZONE, cp, buf);
2926 kmem_error(KMERR_REDZONE, cp, buf);
2930 if (cp->cache_flags & KMF_LITE) {
2935 kmem_cache_free(cp, buf);
3008 kmem_cache_reap(kmem_cache_t *cp)
3011 cp->cache_reap++;
3020 if (cp->cache_reclaim != NULL) {
3027 delta = cp->cache_full.ml_total;
3028 cp->cache_reclaim(cp->cache_private);
3029 delta = cp->cache_full.ml_total - delta;
3031 mutex_enter(&cp->cache_depot_lock);
3032 cp->cache_full.ml_reaplimit += delta;
3033 cp->cache_full.ml_min += delta;
3034 mutex_exit(&cp->cache_depot_lock);
3038 kmem_depot_ws_reap(cp);
3040 if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3041 kmem_cache_defrag(cp);
3138 kmem_cache_magazine_purge(kmem_cache_t *cp)
3144 ASSERT(!list_link_active(&cp->cache_link) ||
3146 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3149 ccp = &cp->cache_cpu[cpu_seqid];
3164 kmem_magazine_destroy(cp, mp, rounds);
3166 kmem_magazine_destroy(cp, pmp, prounds);
3169 kmem_depot_ws_zero(cp);
3170 kmem_depot_ws_reap(cp);
3177 kmem_cache_magazine_enable(kmem_cache_t *cp)
3181 if (cp->cache_flags & KMF_NOMAGAZINE)
3185 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3187 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3197 kmem_cache_reap_now(kmem_cache_t *cp)
3199 ASSERT(list_link_active(&cp->cache_link));
3201 kmem_depot_ws_zero(cp);
3204 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3220 kmem_cache_magazine_resize(kmem_cache_t *cp)
3222 kmem_magtype_t *mtp = cp->cache_magtype;
3226 if (cp->cache_chunksize < mtp->mt_maxbuf) {
3227 kmem_cache_magazine_purge(cp);
3228 mutex_enter(&cp->cache_depot_lock);
3229 cp->cache_magtype = ++mtp;
3230 cp->cache_depot_contention_prev =
3231 cp->cache_depot_contention + INT_MAX;
3232 mutex_exit(&cp->cache_depot_lock);
3233 kmem_cache_magazine_enable(cp);
3242 kmem_hash_rescale(kmem_cache_t *cp)
3250 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3251 old_size = cp->cache_hash_mask + 1;
3262 mutex_enter(&cp->cache_lock);
3264 old_size = cp->cache_hash_mask + 1;
3265 old_table = cp->cache_hash_table;
3267 cp->cache_hash_mask = new_size - 1;
3268 cp->cache_hash_table = new_table;
3269 cp->cache_rescale++;
3276 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3283 mutex_exit(&cp->cache_lock);
3293 kmem_cache_update(kmem_cache_t *cp)
3304 mutex_enter(&cp->cache_lock);
3306 if ((cp->cache_flags & KMF_HASH) &&
3307 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3308 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3309 cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3312 mutex_exit(&cp->cache_lock);
3317 kmem_depot_ws_update(cp);
3323 mutex_enter(&cp->cache_depot_lock);
3325 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3326 (int)(cp->cache_depot_contention -
3327 cp->cache_depot_contention_prev) > kmem_depot_contention)
3330 cp->cache_depot_contention_prev = cp->cache_depot_contention;
3332 mutex_exit(&cp->cache_depot_lock);
3336 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3340 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3342 if (cp->cache_defrag != NULL)
3344 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3373 kmem_cache_t *cp = ksp->ks_private;
3384 mutex_enter(&cp->cache_lock);
3386 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
3387 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
3388 kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
3389 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
3390 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
3393 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3410 mutex_enter(&cp->cache_depot_lock);
3412 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
3413 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
3414 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
3415 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
3416 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3418 (cp->cache_flags & KMF_NOMAGAZINE) ?
3419 0 : cp->cache_magtype->mt_magsize;
3421 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
3422 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
3423 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3425 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3426 reap = MIN(reap, cp->cache_full.ml_total);
3428 mutex_exit(&cp->cache_depot_lock);
3430 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
3431 kmcp->kmc_align.value.ui64 = cp->cache_align;
3432 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
3433 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
3435 buf_avail += cp->cache_bufslab;
3437 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
3438 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
3439 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3440 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
3441 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
3442 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
3443 cp->cache_hash_mask + 1 : 0;
3444 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
3445 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
3446 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3447 kmcp->kmc_reap.value.ui64 = cp->cache_reap;
3449 if (cp->cache_defrag == NULL) {
3464 kmem_defrag_t *kd = cp->cache_defrag;
3476 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3478 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3482 mutex_exit(&cp->cache_lock);
3492 kmem_cache_stat(kmem_cache_t *cp, char *name)
3495 kstat_t *ksp = cp->cache_kstat;
3582 const kmem_cache_t *cp;
3591 cp = s1->slab_cache;
3592 ASSERT(MUTEX_HELD(&cp->cache_lock));
3593 binshift = cp->cache_partial_binshift;
3598 w0 -= cp->cache_maxchunks;
3604 w1 -= cp->cache_maxchunks;
3640 kmem_cache_t *cp;
3668 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3672 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3674 bzero(cp, csize);
3675 list_link_init(&cp->cache_link);
3695 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3703 if (cp->cache_flags & KMF_LITE) {
3707 cp->cache_flags |= KMF_BUFTAG;
3708 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3710 cp->cache_flags &= ~KMF_DEBUG;
3714 if (cp->cache_flags & KMF_DEADBEEF)
3715 cp->cache_flags |= KMF_REDZONE;
3717 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3718 cp->cache_flags |= KMF_NOMAGAZINE;
3721 cp->cache_flags &= ~KMF_DEBUG;
3724 cp->cache_flags &= ~KMF_TOUCH;
3727 cp->cache_flags |= KMF_PREFILL;
3730 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3733 cp->cache_flags |= KMF_NOMAGAZINE;
3735 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3736 cp->cache_flags |= KMF_REDZONE;
3738 if (!(cp->cache_flags & KMF_AUDIT))
3739 cp->cache_flags &= ~KMF_CONTENTS;
3741 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3742 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3743 cp->cache_flags |= KMF_FIREWALL;
3746 cp->cache_flags &= ~KMF_FIREWALL;
3748 if (cp->cache_flags & KMF_FIREWALL) {
3749 cp->cache_flags &= ~KMF_BUFTAG;
3750 cp->cache_flags |= KMF_NOMAGAZINE;
3758 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3759 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3760 cp->cache_bufsize = bufsize;
3761 cp->cache_align = align;
3762 cp->cache_constructor = constructor;
3763 cp->cache_destructor = destructor;
3764 cp->cache_reclaim = reclaim;
3765 cp->cache_private = private;
3766 cp->cache_arena = vmp;
3767 cp->cache_cflags = cflags;
3776 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3779 if (cp->cache_flags & KMF_BUFTAG) {
3780 cp->cache_bufctl = chunksize;
3781 cp->cache_buftag = chunksize;
3782 if (cp->cache_flags & KMF_LITE)
3788 if (cp->cache_flags & KMF_DEADBEEF) {
3789 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3790 if (cp->cache_flags & KMF_LITE)
3791 cp->cache_verify = sizeof (uint64_t);
3794 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3796 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3802 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3803 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3804 cp->cache_maxcolor = cp->cache_mincolor;
3805 cp->cache_flags |= KMF_HASH;
3806 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3808 !(cp->cache_flags & KMF_AUDIT) &&
3810 cp->cache_slabsize = vmp->vm_quantum;
3811 cp->cache_mincolor = 0;
3812 cp->cache_maxcolor =
3813 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3814 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3815 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3832 cp->cache_slabsize = bestfit;
3833 cp->cache_mincolor = 0;
3834 cp->cache_maxcolor = bestfit % chunksize;
3835 cp->cache_flags |= KMF_HASH;
3838 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3839 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3848 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3849 cp->cache_constructor != NULL)
3850 cp->cache_flags &= ~KMF_PREFILL;
3852 if (cp->cache_flags & KMF_HASH) {
3854 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3858 if (cp->cache_maxcolor >= vmp->vm_quantum)
3859 cp->cache_maxcolor = vmp->vm_quantum - 1;
3861 cp->cache_color = cp->cache_mincolor;
3866 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3868 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3873 list_create(&cp->cache_complete_slabs,
3876 if (cp->cache_flags & KMF_HASH) {
3877 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3879 bzero(cp->cache_hash_table,
3881 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3882 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3888 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3893 cp->cache_magtype = mtp;
3899 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3901 ccp->cc_flags = cp->cache_flags;
3909 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3913 cp->cache_kstat->ks_data = &kmem_cache_kstat;
3914 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3915 cp->cache_kstat->ks_private = cp;
3916 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3917 kstat_install(cp->cache_kstat);
3925 list_insert_tail(&kmem_caches, cp);
3929 kmem_cache_magazine_enable(cp);
3931 return (cp);
3974 kmem_cache_set_move(kmem_cache_t *cp,
3986 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
3987 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
3996 mutex_enter(&cp->cache_lock);
3998 if (KMEM_IS_MOVABLE(cp)) {
3999 if (cp->cache_move == NULL) {
4000 ASSERT(cp->cache_slab_alloc == 0);
4002 cp->cache_defrag = defrag;
4004 bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4005 avl_create(&cp->cache_defrag->kmd_moves_pending,
4011 list_create(&cp->cache_defrag->kmd_deadlist,
4014 kmem_reset_reclaim_threshold(cp->cache_defrag);
4016 cp->cache_move = move;
4019 mutex_exit(&cp->cache_lock);
4027 kmem_cache_destroy(kmem_cache_t *cp)
4037 list_remove(&kmem_caches, cp);
4043 if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4046 kmem_cache_magazine_purge(cp);
4048 mutex_enter(&cp->cache_lock);
4049 if (cp->cache_buftotal != 0)
4051 cp->cache_name, (void *)cp);
4052 if (cp->cache_defrag != NULL) {
4053 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4054 list_destroy(&cp->cache_defrag->kmd_deadlist);
4055 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4056 cp->cache_defrag = NULL;
4064 cp->cache_constructor = (int (*)(void *, void *, int))1;
4065 cp->cache_destructor = (void (*)(void *, void *))2;
4066 cp->cache_reclaim = (void (*)(void *))3;
4067 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4068 mutex_exit(&cp->cache_lock);
4070 kstat_delete(cp->cache_kstat);
4072 if (cp->cache_hash_table != NULL)
4073 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4074 (cp->cache_hash_mask + 1) * sizeof (void *));
4077 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4079 mutex_destroy(&cp->cache_depot_lock);
4080 mutex_destroy(&cp->cache_lock);
4082 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4111 kmem_cache_t *cp;
4130 cp = kmem_cache_create(name, cache_size, align,
4134 alloc_table[(size - 1) >> shift] = cp;
4243 kmem_cache_t *cp;
4312 while ((cp = list_tail(&kmem_caches)) != NULL)
4313 kmem_cache_destroy(cp);
4511 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4515 ASSERT(MUTEX_HELD(&cp->cache_lock));
4518 if (cp->cache_flags & KMF_HASH) {
4519 for (bcp = *KMEM_HASH(cp, buf);
4529 sp = KMEM_SLAB(cp, buf);
4531 bufbcp = KMEM_BUFCTL(cp, buf);
4541 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4545 ASSERT(cp->cache_defrag != NULL);
4580 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4588 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4590 ASSERT(MUTEX_HELD(&cp->cache_lock));
4599 avl_remove(&cp->cache_partial_slabs, sp);
4602 avl_add(&cp->cache_partial_slabs, sp);
4611 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4614 ASSERT(MUTEX_HELD(&cp->cache_lock));
4621 avl_remove(&cp->cache_partial_slabs, sp);
4625 avl_add(&cp->cache_partial_slabs, sp);
4666 kmem_cache_t *cp = sp->slab_cache;
4670 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4679 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4680 kmem_slab_free(cp, callback->kmm_to_buf);
4681 kmem_move_end(cp, callback);
4689 mutex_enter(&cp->cache_lock);
4690 free_on_slab = (kmem_slab_allocated(cp, sp,
4692 mutex_exit(&cp->cache_lock);
4695 kmem_slab_free(cp, callback->kmm_to_buf);
4696 kmem_move_end(cp, callback);
4700 if (cp->cache_flags & KMF_BUFTAG) {
4704 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4706 kmem_move_end(cp, callback);
4709 } else if (cp->cache_constructor != NULL &&
4710 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4712 atomic_inc_64(&cp->cache_alloc_fail);
4713 kmem_slab_free(cp, callback->kmm_to_buf);
4714 kmem_move_end(cp, callback);
4718 cp->cache_defrag->kmd_callbacks++;
4719 cp->cache_defrag->kmd_thread = curthread;
4720 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4721 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4722 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4725 response = cp->cache_move(callback->kmm_from_buf,
4726 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4728 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4730 cp->cache_defrag->kmd_thread = NULL;
4731 cp->cache_defrag->kmd_from_buf = NULL;
4732 cp->cache_defrag->kmd_to_buf = NULL;
4735 cp->cache_defrag->kmd_yes++;
4736 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4739 cp->cache_defrag->kmd_slabs_freed++;
4740 mutex_enter(&cp->cache_lock);
4741 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4742 mutex_exit(&cp->cache_lock);
4743 kmem_move_end(cp, callback);
4749 cp->cache_defrag->kmd_no++;
4750 mutex_enter(&cp->cache_lock);
4751 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4752 mutex_exit(&cp->cache_lock);
4755 cp->cache_defrag->kmd_later++;
4756 mutex_enter(&cp->cache_lock);
4758 mutex_exit(&cp->cache_lock);
4763 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4768 mutex_exit(&cp->cache_lock);
4771 cp->cache_defrag->kmd_dont_need++;
4772 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4774 cp->cache_defrag->kmd_slabs_freed++;
4775 mutex_enter(&cp->cache_lock);
4776 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4777 mutex_exit(&cp->cache_lock);
4794 cp->cache_defrag->kmd_dont_know++;
4798 cp->cache_name, (void *)cp, response);
4801 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4802 kmem_move_end(cp, callback);
4807 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4815 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4827 mutex_enter(&cp->cache_lock);
4829 n = avl_numnodes(&cp->cache_partial_slabs);
4831 mutex_exit(&cp->cache_lock);
4836 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4845 mutex_exit(&cp->cache_lock);
4850 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4853 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4855 mutex_exit(&cp->cache_lock);
4859 mutex_enter(&cp->cache_lock);
4860 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4861 mutex_exit(&cp->cache_lock);
4862 kmem_slab_free(cp, to_buf);
4871 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4875 ASSERT(cp->cache_defrag != NULL);
4877 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4879 mutex_enter(&cp->cache_lock);
4880 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4882 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4883 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4884 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4900 cp->cache_defrag->kmd_deadcount--;
4901 cp->cache_slab_destroy++;
4902 mutex_exit(&cp->cache_lock);
4903 kmem_slab_destroy(cp, sp);
4904 mutex_enter(&cp->cache_lock);
4907 mutex_exit(&cp->cache_lock);
4926 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4939 ASSERT(MUTEX_HELD(&cp->cache_lock));
4941 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
4942 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
4943 avl_numnodes(&cp->cache_partial_slabs) > 1);
4966 sp = avl_last(&cp->cache_partial_slabs);
4969 ((sp != avl_first(&cp->cache_partial_slabs)) ||
4971 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
4973 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
4981 buf = (((char *)buf) + cp->cache_chunksize), j++) {
4983 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5007 mutex_exit(&cp->cache_lock);
5009 success = kmem_move_begin(cp, sp, buf, flags);
5023 mutex_enter(&cp->cache_lock);
5029 &cp->cache_defrag->kmd_deadlist;
5033 &cp->cache_defrag->kmd_moves_pending)) {
5058 cp->cache_defrag->kmd_deadcount--;
5059 cp->cache_slab_destroy++;
5060 mutex_exit(&cp->cache_lock);
5061 kmem_slab_destroy(cp, sp);
5062 mutex_enter(&cp->cache_lock);
5110 ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5111 if (sp == avl_first(&cp->cache_partial_slabs)) {
5134 kmem_cache_t *cp = args->kmna_cache;
5139 ASSERT(list_link_active(&cp->cache_link));
5142 mutex_enter(&cp->cache_lock);
5143 sp = kmem_slab_allocated(cp, NULL, buf);
5147 mutex_exit(&cp->cache_lock);
5152 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5160 mutex_exit(&cp->cache_lock);
5164 kmem_slab_move_yes(cp, sp, buf);
5167 mutex_exit(&cp->cache_lock);
5169 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5170 mutex_enter(&cp->cache_lock);
5174 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5178 &cp->cache_defrag->kmd_moves_pending)) {
5180 mutex_exit(&cp->cache_lock);
5184 cp->cache_defrag->kmd_deadcount--;
5185 cp->cache_slab_destroy++;
5186 mutex_exit(&cp->cache_lock);
5187 kmem_slab_destroy(cp, sp);
5191 kmem_slab_move_yes(cp, sp, buf);
5193 mutex_exit(&cp->cache_lock);
5197 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5203 args->kmna_cache = cp;
5213 kmem_cache_defrag(kmem_cache_t *cp)
5217 ASSERT(cp->cache_defrag != NULL);
5219 mutex_enter(&cp->cache_lock);
5220 n = avl_numnodes(&cp->cache_partial_slabs);
5223 cp->cache_defrag->kmd_defrags++;
5224 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5226 mutex_exit(&cp->cache_lock);
5231 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5236 * cp->cache_buftotal kmem_frag_denom
5239 (cp->cache_buftotal * kmem_frag_numer));
5243 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5248 ASSERT(MUTEX_HELD(&cp->cache_lock));
5252 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5256 if ((cp->cache_complete_slab_count + avl_numnodes(
5257 &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5262 nfree = cp->cache_bufslab;
5263 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5264 kmem_cache_frag_threshold(cp, nfree));
5275 mutex_enter(&cp->cache_depot_lock);
5276 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5277 reap = MIN(reap, cp->cache_full.ml_total);
5278 mutex_exit(&cp->cache_depot_lock);
5280 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5281 if (kmem_cache_frag_threshold(cp, nfree)) {
5291 kmem_cache_scan(kmem_cache_t *cp)
5298 mutex_enter(&cp->cache_lock);
5300 kmd = cp->cache_defrag;
5303 mutex_exit(&cp->cache_lock);
5304 kmem_cache_reap(cp);
5308 if (kmem_cache_is_fragmented(cp, &reap)) {
5322 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5347 kmem_reset_reclaim_threshold(cp->cache_defrag);
5349 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5360 mutex_exit(&cp->cache_lock);
5361 kmem_cache_reap(cp);
5365 (void) kmem_move_buffers(cp,
5372 mutex_exit(&cp->cache_lock);
5375 kmem_depot_ws_reap(cp);