Lines Matching defs:cp

142  *	is always zero.  umem_cache_alloc uses cp->cache_cpu_mask to
155 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
931 umem_cache_t *cp;
934 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
935 cp = cp->cache_next)
936 func(cp);
941 umem_add_update_unlocked(umem_cache_t *cp, int flags)
950 if (cp->cache_uflags & UMU_ACTIVE) {
951 cp->cache_uflags |= flags;
953 if (cp->cache_unext != NULL) {
954 ASSERT(cp->cache_uflags != 0);
955 cp->cache_uflags |= flags;
957 ASSERT(cp->cache_uflags == 0);
958 cp->cache_uflags = flags;
959 cp->cache_unext = cnext = &umem_null_cache;
960 cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
961 cnext->cache_uprev = cp;
962 cprev->cache_unext = cp;
968 umem_add_update(umem_cache_t *cp, int flags)
972 umem_add_update_unlocked(cp, flags);
985 umem_remove_updates(umem_cache_t *cp)
992 while (cp->cache_uflags & UMU_ACTIVE) {
995 ASSERT(cp->cache_unext == NULL);
997 cp->cache_uflags |= UMU_NOTIFY;
1014 if (cp->cache_unext != NULL) {
1015 cp->cache_uprev->cache_unext = cp->cache_unext;
1016 cp->cache_unext->cache_uprev = cp->cache_uprev;
1017 cp->cache_uprev = cp->cache_unext = NULL;
1018 cp->cache_uflags = 0;
1023 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0);
1030 umem_cache_t *cp;
1040 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
1041 cp = cp->cache_next)
1042 umem_add_update_unlocked(cp, flags);
1055 umem_findslab(umem_cache_t *cp, void *buf)
1059 (void) mutex_lock(&cp->cache_lock);
1060 for (sp = cp->cache_nullslab.slab_next;
1061 sp != &cp->cache_nullslab; sp = sp->slab_next) {
1063 (void) mutex_unlock(&cp->cache_lock);
1067 (void) mutex_unlock(&cp->cache_lock);
1077 umem_cache_t *cp = cparg;
1088 sp = umem_findslab(cp, buf);
1090 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
1091 cp = cp->cache_prev) {
1092 if ((sp = umem_findslab(cp, buf)) != NULL)
1098 cp = NULL;
1101 if (cp != cparg)
1105 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1108 if (cp->cache_flags & UMF_BUFTAG)
1109 btp = UMEM_BUFTAG(cp, buf);
1110 if (cp->cache_flags & UMF_HASH) {
1111 (void) mutex_lock(&cp->cache_lock);
1112 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1115 (void) mutex_unlock(&cp->cache_lock);
1118 if (umem_findslab(cp->cache_bufctl_cache, bcp) ==
1131 umem_abort_info.ump_realcache = cp;
1141 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1175 umem_printf("buffer was allocated from %s,\n", cp->cache_name);
1195 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) &&
1209 (void *)sp, cp->cache_name);
1228 umem_alloc_retry(umem_cache_t *cp, int umflag)
1230 if (cp == &umem_null_cache) {
1367 #define UMEM_AUDIT(lp, cp, bcp) \
1373 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \
1379 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp,
1388 bcp->bc_cache = cp;
1389 UMEM_AUDIT(lp, cp, bcp);
1393 * Create a new slab for cache cp.
1396 umem_slab_create(umem_cache_t *cp, int umflag)
1398 size_t slabsize = cp->cache_slabsize;
1399 size_t chunksize = cp->cache_chunksize;
1400 int cache_flags = cp->cache_flags;
1405 vmem_t *vmp = cp->cache_arena;
1407 color = cp->cache_color + cp->cache_align;
1408 if (color > cp->cache_maxcolor)
1409 color = cp->cache_mincolor;
1410 cp->cache_color = color;
1419 if (!(cp->cache_cflags & UMC_NOTOUCH) &&
1420 (cp->cache_flags & UMF_DEADBEEF))
1428 sp = UMEM_SLAB(cp, slab);
1432 sp->slab_cache = cp;
1441 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag);
1448 bcap->bc_cache = cp;
1453 bcp = UMEM_BUFCTL(cp, buf);
1456 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1462 cp->cache_verify);
1470 umem_log_event(umem_slab_log, cp, sp, slab);
1478 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1488 umem_log_event(umem_failure_log, cp, NULL, NULL);
1489 atomic_add_64(&cp->cache_alloc_fail, 1);
1498 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp)
1500 vmem_t *vmp = cp->cache_arena;
1503 if (cp->cache_flags & UMF_HASH) {
1507 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1511 vmem_free(vmp, slab, cp->cache_slabsize);
1515 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1518 umem_slab_alloc(umem_cache_t *cp, int umflag)
1524 (void) mutex_lock(&cp->cache_lock);
1525 cp->cache_slab_alloc++;
1526 sp = cp->cache_freelist;
1527 ASSERT(sp->slab_cache == cp);
1532 (void) mutex_unlock(&cp->cache_lock);
1533 if (cp == &umem_null_cache)
1535 if ((sp = umem_slab_create(cp, umflag)) == NULL)
1537 (void) mutex_lock(&cp->cache_lock);
1538 cp->cache_slab_create++;
1539 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1540 cp->cache_bufmax = cp->cache_buftotal;
1541 sp->slab_next = cp->cache_freelist;
1542 sp->slab_prev = cp->cache_freelist->slab_prev;
1545 cp->cache_freelist = sp;
1557 cp->cache_freelist = sp->slab_next;
1561 if (cp->cache_flags & UMF_HASH) {
1566 hash_bucket = UMEM_HASH(cp, buf);
1569 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1570 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1573 buf = UMEM_BUF(cp, bcp);
1578 (void) mutex_unlock(&cp->cache_lock);
1584 * Free a raw (unconstructed) buffer to cp's slab layer.
1587 umem_slab_free(umem_cache_t *cp, void *buf)
1594 (void) mutex_lock(&cp->cache_lock);
1595 cp->cache_slab_free++;
1597 if (cp->cache_flags & UMF_HASH) {
1601 prev_bcpp = UMEM_HASH(cp, buf);
1608 cp->cache_lookup_depth++;
1612 bcp = UMEM_BUFCTL(cp, buf);
1613 sp = UMEM_SLAB(cp, buf);
1616 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) {
1617 (void) mutex_unlock(&cp->cache_lock);
1618 umem_error(UMERR_BADADDR, cp, buf);
1622 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1623 if (cp->cache_flags & UMF_CONTENTS)
1626 cp->cache_contents);
1627 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1635 ASSERT(cp->cache_freelist != sp);
1638 sp->slab_next = cp->cache_freelist;
1639 sp->slab_prev = cp->cache_freelist->slab_prev;
1642 cp->cache_freelist = sp;
1656 if (sp == cp->cache_freelist)
1657 cp->cache_freelist = sp->slab_next;
1658 cp->cache_slab_destroy++;
1659 cp->cache_buftotal -= sp->slab_chunks;
1660 (void) mutex_unlock(&cp->cache_lock);
1661 umem_slab_destroy(cp, sp);
1664 (void) mutex_unlock(&cp->cache_lock);
1668 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag)
1670 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1676 umem_error(UMERR_BADBUFTAG, cp, buf);
1682 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1683 umem_error(UMERR_BADBUFCTL, cp, buf);
1689 if (cp->cache_flags & UMF_DEADBEEF) {
1691 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) {
1692 umem_error(UMERR_MODIFIED, cp, buf);
1697 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 &&
1700 umem_log_event(umem_failure_log, cp, NULL, NULL);
1710 if (mtbf || (cp->cache_constructor != NULL &&
1711 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) {
1712 atomic_add_64(&cp->cache_alloc_fail, 1);
1714 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1715 umem_slab_free(cp, buf);
1719 if (cp->cache_flags & UMF_AUDIT) {
1720 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1727 umem_cache_free_debug(umem_cache_t *cp, void *buf)
1729 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1735 umem_error(UMERR_DUPFREE, cp, buf);
1738 sp = umem_findslab(cp, buf);
1739 if (sp == NULL || sp->slab_cache != cp)
1740 umem_error(UMERR_BADADDR, cp, buf);
1742 umem_error(UMERR_REDZONE, cp, buf);
1748 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1749 umem_error(UMERR_BADBUFCTL, cp, buf);
1754 umem_error(UMERR_REDZONE, cp, buf);
1758 if (cp->cache_flags & UMF_AUDIT) {
1759 if (cp->cache_flags & UMF_CONTENTS)
1761 buf, cp->cache_contents);
1762 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1765 if (cp->cache_destructor != NULL)
1766 cp->cache_destructor(buf, cp->cache_private);
1768 if (cp->cache_flags & UMF_DEADBEEF)
1769 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1775 * Free each object in magazine mp to cp's slab layer, and free mp itself.
1778 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds)
1782 ASSERT(cp->cache_next == NULL || IN_UPDATE());
1787 if ((cp->cache_flags & UMF_DEADBEEF) &&
1789 cp->cache_verify) != NULL) {
1790 umem_error(UMERR_MODIFIED, cp, buf);
1794 if (!(cp->cache_flags & UMF_BUFTAG) &&
1795 cp->cache_destructor != NULL)
1796 cp->cache_destructor(buf, cp->cache_private);
1798 umem_slab_free(cp, buf);
1800 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1801 _umem_cache_free(cp->cache_magtype->mt_cache, mp);
1808 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp)
1818 if (mutex_trylock(&cp->cache_depot_lock) != 0) {
1819 (void) mutex_lock(&cp->cache_depot_lock);
1820 cp->cache_depot_contention++;
1824 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1831 (void) mutex_unlock(&cp->cache_depot_lock);
1840 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp)
1842 (void) mutex_lock(&cp->cache_depot_lock);
1843 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1847 (void) mutex_unlock(&cp->cache_depot_lock);
1851 * Update the working set statistics for cp's depot.
1854 umem_depot_ws_update(umem_cache_t *cp)
1856 (void) mutex_lock(&cp->cache_depot_lock);
1857 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
1858 cp->cache_full.ml_min = cp->cache_full.ml_total;
1859 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
1860 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
1861 (void) mutex_unlock(&cp->cache_depot_lock);
1868 umem_depot_ws_reap(umem_cache_t *cp)
1873 ASSERT(cp->cache_next == NULL || IN_REAP());
1875 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
1876 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL)
1877 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
1879 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
1880 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL)
1881 umem_magazine_destroy(cp, mp, 0);
1898 * Allocate a constructed object from cache cp.
1902 _umem_cache_alloc(umem_cache_t *cp, int umflag)
1910 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1922 umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1923 if (umem_alloc_retry(cp, umflag)) {
1950 fmp = umem_depot_alloc(cp, &cp->cache_full);
1953 umem_depot_free(cp, &cp->cache_empty,
1971 buf = umem_slab_alloc(cp, umflag);
1974 if (cp == &umem_null_cache)
1976 if (umem_alloc_retry(cp, umflag)) {
1983 if (cp->cache_flags & UMF_BUFTAG) {
1987 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1988 if (umem_alloc_retry(cp, umflag)) {
2001 if (cp->cache_constructor != NULL &&
2002 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) {
2003 atomic_add_64(&cp->cache_alloc_fail, 1);
2004 umem_slab_free(cp, buf);
2006 if (umem_alloc_retry(cp, umflag)) {
2016 * Free a constructed object to cache cp.
2020 _umem_cache_free(umem_cache_t *cp, void *buf)
2022 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
2027 if (umem_cache_free_debug(cp, buf) == -1)
2061 emp = umem_depot_alloc(cp, &cp->cache_empty);
2064 umem_depot_free(cp, &cp->cache_full,
2076 mtp = cp->cache_magtype;
2099 umem_depot_free(cp, &cp->cache_empty, emp);
2117 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL)
2118 cp->cache_destructor(buf, cp->cache_private);
2120 umem_slab_free(cp, buf);
2132 umem_cache_t *cp = umem_alloc_table[index];
2133 buf = _umem_cache_alloc(cp, umflag);
2135 if (cp->cache_flags & UMF_BUFTAG) {
2136 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2141 } else if (umem_alloc_retry(cp, umflag))
2159 umem_cache_t *cp = umem_alloc_table[index];
2160 buf = _umem_cache_alloc(cp, umflag);
2161 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) {
2162 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2166 if (buf == NULL && umem_alloc_retry(cp, umflag))
2224 umem_cache_t *cp = umem_alloc_table[index];
2225 if (cp->cache_flags & UMF_BUFTAG) {
2226 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
2230 umem_error(UMERR_DUPFREE, cp, buf);
2235 umem_error(UMERR_BADSIZE, cp, buf);
2237 umem_error(UMERR_REDZONE, cp, buf);
2242 umem_error(UMERR_REDZONE, cp, buf);
2247 _umem_cache_free(cp, buf);
2294 umem_cache_reap(umem_cache_t *cp)
2303 if (cp->cache_reclaim != NULL)
2304 cp->cache_reclaim(cp->cache_private);
2306 umem_depot_ws_reap(cp);
2315 umem_cache_magazine_purge(umem_cache_t *cp)
2321 ASSERT(cp->cache_next == NULL || IN_UPDATE());
2324 ccp = &cp->cache_cpu[cpu_seqid];
2339 umem_magazine_destroy(cp, mp, rounds);
2341 umem_magazine_destroy(cp, pmp, prounds);
2349 umem_depot_ws_update(cp);
2350 umem_depot_ws_update(cp);
2352 umem_depot_ws_reap(cp);
2359 umem_cache_magazine_enable(umem_cache_t *cp)
2363 if (cp->cache_flags & UMF_NOMAGAZINE)
2367 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2369 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
2388 umem_cache_magazine_resize(umem_cache_t *cp)
2390 umem_magtype_t *mtp = cp->cache_magtype;
2394 if (cp->cache_chunksize < mtp->mt_maxbuf) {
2395 umem_cache_magazine_purge(cp);
2396 (void) mutex_lock(&cp->cache_depot_lock);
2397 cp->cache_magtype = ++mtp;
2398 cp->cache_depot_contention_prev =
2399 cp->cache_depot_contention + INT_MAX;
2400 (void) mutex_unlock(&cp->cache_depot_lock);
2401 umem_cache_magazine_enable(cp);
2410 umem_hash_rescale(umem_cache_t *cp)
2418 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
2419 old_size = cp->cache_hash_mask + 1;
2430 (void) mutex_lock(&cp->cache_lock);
2432 old_size = cp->cache_hash_mask + 1;
2433 old_table = cp->cache_hash_table;
2435 cp->cache_hash_mask = new_size - 1;
2436 cp->cache_hash_table = new_table;
2437 cp->cache_rescale++;
2444 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr);
2451 (void) mutex_unlock(&cp->cache_lock);
2461 umem_cache_update(umem_cache_t *cp)
2471 (void) mutex_lock(&cp->cache_lock);
2473 if ((cp->cache_flags & UMF_HASH) &&
2474 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
2475 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
2476 cp->cache_hash_mask > UMEM_HASH_INITIAL)))
2479 (void) mutex_unlock(&cp->cache_lock);
2484 umem_depot_ws_update(cp);
2490 (void) mutex_lock(&cp->cache_depot_lock);
2492 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
2493 (int)(cp->cache_depot_contention -
2494 cp->cache_depot_contention_prev) > umem_depot_contention)
2497 cp->cache_depot_contention_prev = cp->cache_depot_contention;
2499 (void) mutex_unlock(&cp->cache_depot_lock);
2502 umem_add_update(cp, update_flags);
2517 umem_cache_t *cp = umem_null_cache.cache_unext;
2519 cp->cache_uprev->cache_unext = cp->cache_unext;
2520 cp->cache_unext->cache_uprev = cp->cache_uprev;
2521 cp->cache_uprev = cp->cache_unext = NULL;
2523 ASSERT(!(cp->cache_uflags & UMU_ACTIVE));
2525 while (cp->cache_uflags) {
2526 int uflags = (cp->cache_uflags |= UMU_ACTIVE);
2535 umem_hash_rescale(cp);
2538 umem_cache_magazine_resize(cp);
2541 umem_cache_reap(cp);
2548 if (cp->cache_uflags & UMU_NOTIFY) {
2552 cp->cache_uflags &= ~uflags;
2653 umem_cache_t *cp, *cnext, *cprev;
2707 * Get a umem_cache structure. We arrange that cp->cache_cpu[]
2711 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase,
2714 if (cp == NULL) {
2719 bzero(cp, csize);
2725 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG);
2731 if (cp->cache_flags & UMF_LITE) {
2735 cp->cache_flags |= UMF_BUFTAG;
2736 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2738 cp->cache_flags &= ~UMF_DEBUG;
2742 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT))
2743 cp->cache_flags |= UMF_NOMAGAZINE;
2746 cp->cache_flags &= ~UMF_DEBUG;
2749 cp->cache_flags &= ~UMF_TOUCH;
2752 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2755 cp->cache_flags |= UMF_NOMAGAZINE;
2757 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH))
2758 cp->cache_flags |= UMF_REDZONE;
2760 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall &&
2761 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH))
2762 cp->cache_flags |= UMF_FIREWALL;
2765 cp->cache_flags &= ~UMF_FIREWALL;
2767 if (cp->cache_flags & UMF_FIREWALL) {
2768 cp->cache_flags &= ~UMF_BUFTAG;
2769 cp->cache_flags |= UMF_NOMAGAZINE;
2777 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1);
2778 cp->cache_bufsize = bufsize;
2779 cp->cache_align = align;
2780 cp->cache_constructor = constructor;
2781 cp->cache_destructor = destructor;
2782 cp->cache_reclaim = reclaim;
2783 cp->cache_private = private;
2784 cp->cache_arena = vmp;
2785 cp->cache_cflags = cflags;
2786 cp->cache_cpu_mask = umem_cpu_mask;
2795 cp->cache_bufctl = chunksize - UMEM_ALIGN;
2798 if (cp->cache_flags & UMF_BUFTAG) {
2799 cp->cache_bufctl = chunksize;
2800 cp->cache_buftag = chunksize;
2804 if (cp->cache_flags & UMF_DEADBEEF) {
2805 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify);
2806 if (cp->cache_flags & UMF_LITE)
2807 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN);
2810 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave);
2812 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
2823 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
2824 cp->cache_mincolor = cp->cache_slabsize - chunksize;
2825 cp->cache_maxcolor = cp->cache_mincolor;
2826 cp->cache_flags |= UMF_HASH;
2827 ASSERT(!(cp->cache_flags & UMF_BUFTAG));
2829 !(cp->cache_flags & UMF_AUDIT) &&
2831 cp->cache_slabsize = vmp->vm_quantum;
2832 cp->cache_mincolor = 0;
2833 cp->cache_maxcolor =
2834 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize;
2836 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) {
2840 ASSERT(!(cp->cache_flags & UMF_AUDIT));
2864 cp->cache_slabsize = bestfit;
2865 cp->cache_mincolor = 0;
2866 cp->cache_maxcolor = bestfit % chunksize;
2867 cp->cache_flags |= UMF_HASH;
2870 if (cp->cache_flags & UMF_HASH) {
2872 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ?
2876 if (cp->cache_maxcolor >= vmp->vm_quantum)
2877 cp->cache_maxcolor = vmp->vm_quantum - 1;
2879 cp->cache_color = cp->cache_mincolor;
2884 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL);
2886 cp->cache_freelist = &cp->cache_nullslab;
2887 cp->cache_nullslab.slab_cache = cp;
2888 cp->cache_nullslab.slab_refcnt = -1;
2889 cp->cache_nullslab.slab_next = &cp->cache_nullslab;
2890 cp->cache_nullslab.slab_prev = &cp->cache_nullslab;
2892 if (cp->cache_flags & UMF_HASH) {
2893 cp->cache_hash_table = vmem_alloc(umem_hash_arena,
2895 if (cp->cache_hash_table == NULL) {
2899 bzero(cp->cache_hash_table,
2901 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1;
2902 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
2908 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL);
2913 cp->cache_magtype = mtp;
2919 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2921 ccp->cc_flags = cp->cache_flags;
2931 cp->cache_next = cnext = &umem_null_cache;
2932 cp->cache_prev = cprev = umem_null_cache.cache_prev;
2933 cnext->cache_prev = cp;
2934 cprev->cache_next = cp;
2938 umem_cache_magazine_enable(cp);
2940 return (cp);
2943 (void) mutex_destroy(&cp->cache_lock);
2945 vmem_xfree(umem_cache_arena, cp, csize);
2950 umem_cache_destroy(umem_cache_t *cp)
2960 cp->cache_prev->cache_next = cp->cache_next;
2961 cp->cache_next->cache_prev = cp->cache_prev;
2962 cp->cache_prev = cp->cache_next = NULL;
2965 umem_remove_updates(cp);
2967 umem_cache_magazine_purge(cp);
2969 (void) mutex_lock(&cp->cache_lock);
2970 if (cp->cache_buftotal != 0)
2972 cp->cache_name, (void *)cp);
2973 cp->cache_reclaim = NULL;
2979 cp->cache_constructor = (umem_constructor_t *)1;
2980 cp->cache_destructor = (umem_destructor_t *)2;
2981 (void) mutex_unlock(&cp->cache_lock);
2983 if (cp->cache_hash_table != NULL)
2984 vmem_free(umem_hash_arena, cp->cache_hash_table,
2985 (cp->cache_hash_mask + 1) * sizeof (void *));
2988 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
2990 (void) mutex_destroy(&cp->cache_depot_lock);
2991 (void) mutex_destroy(&cp->cache_lock);
2993 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus));
3089 umem_cache_t *cp;
3092 cp = umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT];
3093 _umem_cache_free(cp, buf);
3101 umem_cache_t *cp;
3203 cp = umem_cache_create(name, cache_size, align,
3205 if (cp == NULL)
3208 umem_alloc_caches[i] = cp;
3232 cp = umem_alloc_caches[i];
3235 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp;