160727d8bSWarner Losh /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4584061b4SJeff Roberson * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org> 508ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 608ecce74SRobert Watson * All rights reserved. 78355f576SJeff Roberson * 88355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 98355f576SJeff Roberson * modification, are permitted provided that the following conditions 108355f576SJeff Roberson * are met: 118355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 128355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 138355f576SJeff Roberson * disclaimer. 148355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 158355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 168355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 178355f576SJeff Roberson * 188355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 198355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 208355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 218355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 228355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 238355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 248355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 258355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 268355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 278355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 288355f576SJeff Roberson * 298355f576SJeff Roberson * $FreeBSD$ 308355f576SJeff Roberson * 318355f576SJeff Roberson */ 328355f576SJeff Roberson 3339669415SGleb Smirnoff #include <sys/counter.h> 34e04223bfSMark Johnston #include <sys/_bitset.h> 357571e249SMark Johnston #include <sys/_domainset.h> 36b28cc462SGleb Smirnoff #include <sys/_task.h> 37b28cc462SGleb Smirnoff 388355f576SJeff Roberson /* 398355f576SJeff Roberson * This file includes definitions, structures, prototypes, and inlines that 408355f576SJeff Roberson * should not be used outside of the actual implementation of UMA. 418355f576SJeff Roberson */ 428355f576SJeff Roberson 438355f576SJeff Roberson /* 44ab3185d1SJeff Roberson * The brief summary; Zones describe unique allocation types. Zones are 45ab3185d1SJeff Roberson * organized into per-CPU caches which are filled by buckets. Buckets are 46ab3185d1SJeff Roberson * organized according to memory domains. Buckets are filled from kegs which 47ab3185d1SJeff Roberson * are also organized according to memory domains. Kegs describe a unique 48ab3185d1SJeff Roberson * allocation type, backend memory provider, and layout. Kegs are associated 49ab3185d1SJeff Roberson * with one or more zones and zones reference one or more kegs. Kegs provide 50ab3185d1SJeff Roberson * slabs which are virtually contiguous collections of pages. Each slab is 51ab3185d1SJeff Roberson * broken down int one or more items that will satisfy an individual allocation. 52ab3185d1SJeff Roberson * 53ab3185d1SJeff Roberson * Allocation is satisfied in the following order: 54ab3185d1SJeff Roberson * 1) Per-CPU cache 55ab3185d1SJeff Roberson * 2) Per-domain cache of buckets 56ab3185d1SJeff Roberson * 3) Slab from any of N kegs 57ab3185d1SJeff Roberson * 4) Backend page provider 58ab3185d1SJeff Roberson * 59ab3185d1SJeff Roberson * More detail on individual objects is contained below: 608355f576SJeff Roberson * 61099a0e58SBosko Milekic * Kegs contain lists of slabs which are stored in either the full bin, empty 628355f576SJeff Roberson * bin, or partially allocated bin, to reduce fragmentation. They also contain 638355f576SJeff Roberson * the user supplied value for size, which is adjusted for alignment purposes 64099a0e58SBosko Milekic * and rsize is the result of that. The Keg also stores information for 658355f576SJeff Roberson * managing a hash of page addresses that maps pages to uma_slab_t structures 668355f576SJeff Roberson * for pages that don't have embedded uma_slab_t's. 678355f576SJeff Roberson * 68ab3185d1SJeff Roberson * Keg slab lists are organized by memory domain to support NUMA allocation 69ab3185d1SJeff Roberson * policies. By default allocations are spread across domains to reduce the 70ab3185d1SJeff Roberson * potential for hotspots. Special keg creation flags may be specified to 71ab3185d1SJeff Roberson * prefer location allocation. However there is no strict enforcement as frees 72ab3185d1SJeff Roberson * may happen on any CPU and these are returned to the CPU-local cache 73ab3185d1SJeff Roberson * regardless of the originating domain. 74ab3185d1SJeff Roberson * 758355f576SJeff Roberson * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 768355f576SJeff Roberson * be allocated off the page from a special slab zone. The free list within a 77ef72505eSJeff Roberson * slab is managed with a bitmask. For item sizes that would yield more than 78ef72505eSJeff Roberson * 10% memory waste we potentially allocate a separate uma_slab_t if this will 79ef72505eSJeff Roberson * improve the number of items per slab that will fit. 808355f576SJeff Roberson * 818355f576SJeff Roberson * The only really gross cases, with regards to memory waste, are for those 828355f576SJeff Roberson * items that are just over half the page size. You can get nearly 50% waste, 838355f576SJeff Roberson * so you fall back to the memory footprint of the power of two allocator. I 848355f576SJeff Roberson * have looked at memory allocation sizes on many of the machines available to 858355f576SJeff Roberson * me, and there does not seem to be an abundance of allocations at this range 868355f576SJeff Roberson * so at this time it may not make sense to optimize for it. This can, of 878355f576SJeff Roberson * course, be solved with dynamic slab sizes. 888355f576SJeff Roberson * 89099a0e58SBosko Milekic * Kegs may serve multiple Zones but by far most of the time they only serve 90099a0e58SBosko Milekic * one. When a Zone is created, a Keg is allocated and setup for it. While 91099a0e58SBosko Milekic * the backing Keg stores slabs, the Zone caches Buckets of items allocated 92099a0e58SBosko Milekic * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 93099a0e58SBosko Milekic * pair, as well as with its own set of small per-CPU caches, layered above 94099a0e58SBosko Milekic * the Zone's general Bucket cache. 95099a0e58SBosko Milekic * 966ab3b958SRobert Watson * The PCPU caches are protected by critical sections, and may be accessed 976ab3b958SRobert Watson * safely only from their associated CPU, while the Zones backed by the same 986ab3b958SRobert Watson * Keg all share a common Keg lock (to coalesce contention on the backing 996ab3b958SRobert Watson * slabs). The backing Keg typically only serves one Zone but in the case of 1006ab3b958SRobert Watson * multiple Zones, one of the Zones is considered the Master Zone and all 1016ab3b958SRobert Watson * Zone-related stats from the Keg are done in the Master Zone. For an 1026ab3b958SRobert Watson * example of a Multi-Zone setup, refer to the Mbuf allocation code. 1038355f576SJeff Roberson */ 1048355f576SJeff Roberson 1058355f576SJeff Roberson /* 1068355f576SJeff Roberson * This is the representation for normal (Non OFFPAGE slab) 1078355f576SJeff Roberson * 1088355f576SJeff Roberson * i == item 1098355f576SJeff Roberson * s == slab pointer 1108355f576SJeff Roberson * 1118355f576SJeff Roberson * <---------------- Page (UMA_SLAB_SIZE) ------------------> 1128355f576SJeff Roberson * ___________________________________________________________ 1138355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 1148355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 1158355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 1168355f576SJeff Roberson * |___________________________________________________________| 1178355f576SJeff Roberson * 1188355f576SJeff Roberson * 1198355f576SJeff Roberson * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 1208355f576SJeff Roberson * 1218355f576SJeff Roberson * ___________________________________________________________ 1228355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 1238355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 1248355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 1258355f576SJeff Roberson * |___________________________________________________________| 1268355f576SJeff Roberson * ___________ ^ 1278355f576SJeff Roberson * |slab header| | 1288355f576SJeff Roberson * |___________|---* 1298355f576SJeff Roberson * 1308355f576SJeff Roberson */ 1318355f576SJeff Roberson 1328355f576SJeff Roberson #ifndef VM_UMA_INT_H 1338355f576SJeff Roberson #define VM_UMA_INT_H 1348355f576SJeff Roberson 1358355f576SJeff Roberson #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 1368355f576SJeff Roberson #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 1378355f576SJeff Roberson #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 1388355f576SJeff Roberson 139ad97af7eSGleb Smirnoff /* Max waste percentage before going to off page slab management */ 140ad97af7eSGleb Smirnoff #define UMA_MAX_WASTE 10 1418355f576SJeff Roberson 1424a8b575cSRyan Libby /* Max size of a CACHESPREAD slab. */ 1434a8b575cSRyan Libby #define UMA_CACHESPREAD_MAX_SIZE (128 * 1024) 1444a8b575cSRyan Libby 14554c5ae80SRyan Libby /* 14654c5ae80SRyan Libby * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 14754c5ae80SRyan Libby */ 14854c5ae80SRyan Libby #define UMA_ZFLAG_OFFPAGE 0x00200000 /* 14954c5ae80SRyan Libby * Force the slab structure 15054c5ae80SRyan Libby * allocation off of the real 15154c5ae80SRyan Libby * memory. 15254c5ae80SRyan Libby */ 15354c5ae80SRyan Libby #define UMA_ZFLAG_HASH 0x00400000 /* 15454c5ae80SRyan Libby * Use a hash table instead of 15554c5ae80SRyan Libby * caching information in the 15654c5ae80SRyan Libby * vm_page. 15754c5ae80SRyan Libby */ 15854c5ae80SRyan Libby #define UMA_ZFLAG_VTOSLAB 0x00800000 /* 15954c5ae80SRyan Libby * Zone uses vtoslab for 16054c5ae80SRyan Libby * lookup. 16154c5ae80SRyan Libby */ 16254c5ae80SRyan Libby #define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */ 16354c5ae80SRyan Libby #define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */ 16454c5ae80SRyan Libby #define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */ 16554c5ae80SRyan Libby #define UMA_ZFLAG_RECLAIMING 0x08000000 /* Running zone_reclaim(). */ 16654c5ae80SRyan Libby #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */ 16754c5ae80SRyan Libby #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 16854c5ae80SRyan Libby #define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */ 16954c5ae80SRyan Libby #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 17054c5ae80SRyan Libby 17154c5ae80SRyan Libby #define UMA_ZFLAG_INHERIT \ 17254c5ae80SRyan Libby (UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_HASH | UMA_ZFLAG_VTOSLAB | \ 17354c5ae80SRyan Libby UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY) 17454c5ae80SRyan Libby 17554c5ae80SRyan Libby #define PRINT_UMA_ZFLAGS "\20" \ 17654c5ae80SRyan Libby "\40CACHEONLY" \ 17754c5ae80SRyan Libby "\37TRASH" \ 17854c5ae80SRyan Libby "\36INTERNAL" \ 17954c5ae80SRyan Libby "\35BUCKET" \ 18054c5ae80SRyan Libby "\34RECLAIMING" \ 18154c5ae80SRyan Libby "\33CACHE" \ 18254c5ae80SRyan Libby "\32LIMIT" \ 18354c5ae80SRyan Libby "\31CTORDTOR" \ 18454c5ae80SRyan Libby "\30VTOSLAB" \ 18554c5ae80SRyan Libby "\27HASH" \ 18654c5ae80SRyan Libby "\26OFFPAGE" \ 18754c5ae80SRyan Libby "\22ROUNDROBIN" \ 18854c5ae80SRyan Libby "\21FIRSTTOUCH" \ 18954c5ae80SRyan Libby "\20PCPU" \ 19054c5ae80SRyan Libby "\17NODUMP" \ 19154c5ae80SRyan Libby "\16CACHESPREAD" \ 19254c5ae80SRyan Libby "\15MINBUCKET" \ 19354c5ae80SRyan Libby "\14MAXBUCKET" \ 19454c5ae80SRyan Libby "\13NOBUCKET" \ 19554c5ae80SRyan Libby "\12SECONDARY" \ 19654c5ae80SRyan Libby "\11NOTPAGE" \ 19754c5ae80SRyan Libby "\10VM" \ 19854c5ae80SRyan Libby "\7MTXCLASS" \ 19954c5ae80SRyan Libby "\6NOFREE" \ 20054c5ae80SRyan Libby "\5MALLOC" \ 20154c5ae80SRyan Libby "\4NOTOUCH" \ 20254c5ae80SRyan Libby "\2ZINIT" 2038355f576SJeff Roberson 2048355f576SJeff Roberson /* 2051e0701e1SJeff Roberson * Hash table for freed address -> slab translation. 2061e0701e1SJeff Roberson * 2071e0701e1SJeff Roberson * Only zones with memory not touchable by the allocator use the 2081e0701e1SJeff Roberson * hash table. Otherwise slabs are found with vtoslab(). 2098355f576SJeff Roberson */ 2101e0701e1SJeff Roberson #define UMA_HASH_SIZE_INIT 32 2118355f576SJeff Roberson 212ef72505eSJeff Roberson #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask) 2138355f576SJeff Roberson 2148355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem) \ 2151e0701e1SJeff Roberson LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 216*9b8db4d0SRyan Libby (mem))], slab_tohashslab(s), uhs_hlink) 2178355f576SJeff Roberson 2181e0701e1SJeff Roberson #define UMA_HASH_REMOVE(h, s) \ 219*9b8db4d0SRyan Libby LIST_REMOVE(slab_tohashslab(s), uhs_hlink) 2208355f576SJeff Roberson 2211e0701e1SJeff Roberson LIST_HEAD(slabhashhead, uma_hash_slab); 2228355f576SJeff Roberson 2238355f576SJeff Roberson struct uma_hash { 2241e0701e1SJeff Roberson struct slabhashhead *uh_slab_hash; /* Hash table for slabs */ 2256929b7d1SPedro F. Giffuni u_int uh_hashsize; /* Current size of the hash table */ 2266929b7d1SPedro F. Giffuni u_int uh_hashmask; /* Mask used during hashing */ 2278355f576SJeff Roberson }; 2288355f576SJeff Roberson 2298355f576SJeff Roberson /* 23079c9f942SJeff Roberson * Align field or structure to cache 'sector' in intel terminology. This 23179c9f942SJeff Roberson * is more efficient with adjacent line prefetch. 2325e4bb93cSKip Macy */ 23312f69195SJustin Hibbits #if defined(__amd64__) || defined(__powerpc64__) 23479c9f942SJeff Roberson #define UMA_SUPER_ALIGN (CACHE_LINE_SIZE * 2) 2351a23373cSKip Macy #else 23679c9f942SJeff Roberson #define UMA_SUPER_ALIGN CACHE_LINE_SIZE 2371a23373cSKip Macy #endif 2385e4bb93cSKip Macy 23979c9f942SJeff Roberson #define UMA_ALIGN __aligned(UMA_SUPER_ALIGN) 24079c9f942SJeff Roberson 2415e4bb93cSKip Macy /* 242376b1ba3SJeff Roberson * The uma_bucket structure is used to queue and manage buckets divorced 243376b1ba3SJeff Roberson * from per-cpu caches. They are loaded into uma_cache_bucket structures 244376b1ba3SJeff Roberson * for use. 2458355f576SJeff Roberson */ 2468355f576SJeff Roberson struct uma_bucket { 24708cfa56eSMark Johnston TAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 248306abf0fSGleb Smirnoff int16_t ub_cnt; /* Count of items in bucket. */ 249cae33c14SJeff Roberson int16_t ub_entries; /* Max items. */ 250cae33c14SJeff Roberson void *ub_bucket[]; /* actual allocation storage */ 2511a23373cSKip Macy }; 2528355f576SJeff Roberson 2538355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t; 2548355f576SJeff Roberson 255376b1ba3SJeff Roberson /* 256376b1ba3SJeff Roberson * The uma_cache_bucket structure is statically allocated on each per-cpu 257376b1ba3SJeff Roberson * cache. Its use reduces branches and cache misses in the fast path. 258376b1ba3SJeff Roberson */ 259376b1ba3SJeff Roberson struct uma_cache_bucket { 260376b1ba3SJeff Roberson uma_bucket_t ucb_bucket; 261376b1ba3SJeff Roberson int16_t ucb_cnt; 262376b1ba3SJeff Roberson int16_t ucb_entries; 263376b1ba3SJeff Roberson uint32_t ucb_spare; 264376b1ba3SJeff Roberson }; 265376b1ba3SJeff Roberson 266376b1ba3SJeff Roberson typedef struct uma_cache_bucket * uma_cache_bucket_t; 267376b1ba3SJeff Roberson 268376b1ba3SJeff Roberson /* 269376b1ba3SJeff Roberson * The uma_cache structure is allocated for each cpu for every zone 270376b1ba3SJeff Roberson * type. This optimizes synchronization out of the allocator fast path. 271376b1ba3SJeff Roberson */ 2728355f576SJeff Roberson struct uma_cache { 273376b1ba3SJeff Roberson struct uma_cache_bucket uc_freebucket; /* Bucket we're freeing to */ 274376b1ba3SJeff Roberson struct uma_cache_bucket uc_allocbucket; /* Bucket to allocate from */ 275376b1ba3SJeff Roberson struct uma_cache_bucket uc_crossbucket; /* cross domain bucket */ 27685dcf349SGleb Smirnoff uint64_t uc_allocs; /* Count of allocations */ 27785dcf349SGleb Smirnoff uint64_t uc_frees; /* Count of frees */ 2785e4bb93cSKip Macy } UMA_ALIGN; 2798355f576SJeff Roberson 2808355f576SJeff Roberson typedef struct uma_cache * uma_cache_t; 2818355f576SJeff Roberson 2821e0701e1SJeff Roberson LIST_HEAD(slabhead, uma_slab); 2831e0701e1SJeff Roberson 2848355f576SJeff Roberson /* 285cc7ce83aSJeff Roberson * The cache structure pads perfectly into 64 bytes so we use spare 286cc7ce83aSJeff Roberson * bits from the embedded cache buckets to store information from the zone 287cc7ce83aSJeff Roberson * and keep all fast-path allocations accessing a single per-cpu line. 288cc7ce83aSJeff Roberson */ 289cc7ce83aSJeff Roberson static inline void 290cc7ce83aSJeff Roberson cache_set_uz_flags(uma_cache_t cache, uint32_t flags) 291cc7ce83aSJeff Roberson { 292cc7ce83aSJeff Roberson 293cc7ce83aSJeff Roberson cache->uc_freebucket.ucb_spare = flags; 294cc7ce83aSJeff Roberson } 295cc7ce83aSJeff Roberson 296cc7ce83aSJeff Roberson static inline void 297cc7ce83aSJeff Roberson cache_set_uz_size(uma_cache_t cache, uint32_t size) 298cc7ce83aSJeff Roberson { 299cc7ce83aSJeff Roberson 300cc7ce83aSJeff Roberson cache->uc_allocbucket.ucb_spare = size; 301cc7ce83aSJeff Roberson } 302cc7ce83aSJeff Roberson 303cc7ce83aSJeff Roberson static inline uint32_t 304cc7ce83aSJeff Roberson cache_uz_flags(uma_cache_t cache) 305cc7ce83aSJeff Roberson { 306cc7ce83aSJeff Roberson 307cc7ce83aSJeff Roberson return (cache->uc_freebucket.ucb_spare); 308cc7ce83aSJeff Roberson } 309cc7ce83aSJeff Roberson 310cc7ce83aSJeff Roberson static inline uint32_t 311cc7ce83aSJeff Roberson cache_uz_size(uma_cache_t cache) 312cc7ce83aSJeff Roberson { 313cc7ce83aSJeff Roberson 314cc7ce83aSJeff Roberson return (cache->uc_allocbucket.ucb_spare); 315cc7ce83aSJeff Roberson } 316cc7ce83aSJeff Roberson 317cc7ce83aSJeff Roberson /* 318376b1ba3SJeff Roberson * Per-domain slab lists. Embedded in the kegs. 319ab3185d1SJeff Roberson */ 320ab3185d1SJeff Roberson struct uma_domain { 3218b987a77SJeff Roberson struct mtx_padalign ud_lock; /* Lock for the domain lists. */ 3221e0701e1SJeff Roberson struct slabhead ud_part_slab; /* partially allocated slabs */ 3231e0701e1SJeff Roberson struct slabhead ud_free_slab; /* completely unallocated slabs */ 3241e0701e1SJeff Roberson struct slabhead ud_full_slab; /* fully allocated slabs */ 3258b987a77SJeff Roberson uint32_t ud_pages; /* Total page count */ 3268b987a77SJeff Roberson uint32_t ud_free; /* Count of items free in slabs */ 327727c6918SJeff Roberson } __aligned(CACHE_LINE_SIZE); 328ab3185d1SJeff Roberson 329ab3185d1SJeff Roberson typedef struct uma_domain * uma_domain_t; 330ab3185d1SJeff Roberson 331ab3185d1SJeff Roberson /* 332099a0e58SBosko Milekic * Keg management structure 333099a0e58SBosko Milekic * 334099a0e58SBosko Milekic * TODO: Optimize for cache line size 335099a0e58SBosko Milekic * 336099a0e58SBosko Milekic */ 337099a0e58SBosko Milekic struct uma_keg { 338099a0e58SBosko Milekic struct uma_hash uk_hash; 339099a0e58SBosko Milekic LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 340099a0e58SBosko Milekic 341194a979eSMark Johnston struct domainset_ref uk_dr; /* Domain selection policy. */ 34285dcf349SGleb Smirnoff uint32_t uk_align; /* Alignment mask */ 3436fd34d6fSJeff Roberson uint32_t uk_reserve; /* Number of reserved items. */ 34485dcf349SGleb Smirnoff uint32_t uk_size; /* Requested size of each item */ 34585dcf349SGleb Smirnoff uint32_t uk_rsize; /* Real size of each item */ 346099a0e58SBosko Milekic 347099a0e58SBosko Milekic uma_init uk_init; /* Keg's init routine */ 348099a0e58SBosko Milekic uma_fini uk_fini; /* Keg's fini routine */ 349099a0e58SBosko Milekic uma_alloc uk_allocf; /* Allocation function */ 350099a0e58SBosko Milekic uma_free uk_freef; /* Free routine */ 351099a0e58SBosko Milekic 352a4915c21SAttilio Rao u_long uk_offset; /* Next free offset from base KVA */ 353a4915c21SAttilio Rao vm_offset_t uk_kva; /* Zone base KVA */ 354099a0e58SBosko Milekic 3552d54d4bbSMark Johnston uint32_t uk_pgoff; /* Offset to uma_slab struct */ 35685dcf349SGleb Smirnoff uint16_t uk_ppera; /* pages per allocation from backend */ 35785dcf349SGleb Smirnoff uint16_t uk_ipers; /* Items per slab */ 35885dcf349SGleb Smirnoff uint32_t uk_flags; /* Internal flags */ 359ad97af7eSGleb Smirnoff 360ad97af7eSGleb Smirnoff /* Least used fields go to the last cache line. */ 361ad97af7eSGleb Smirnoff const char *uk_name; /* Name of creating zone. */ 362ad97af7eSGleb Smirnoff LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 363ab3185d1SJeff Roberson 364ab3185d1SJeff Roberson /* Must be last, variable sized. */ 365ab3185d1SJeff Roberson struct uma_domain uk_domain[]; /* Keg's slab lists. */ 366099a0e58SBosko Milekic }; 367099a0e58SBosko Milekic typedef struct uma_keg * uma_keg_t; 368099a0e58SBosko Milekic 369815db204SRyan Libby #ifdef _KERNEL 370727c6918SJeff Roberson #define KEG_ASSERT_COLD(k) \ 37131c251a0SJeff Roberson KASSERT(uma_keg_get_allocs((k)) == 0, \ 3728b987a77SJeff Roberson ("keg %s initialization after use.", (k)->uk_name)) 373727c6918SJeff Roberson 374ef72505eSJeff Roberson /* 375ef72505eSJeff Roberson * Free bits per-slab. 376ef72505eSJeff Roberson */ 3779b78b1f4SJeff Roberson #define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT) 3789b78b1f4SJeff Roberson #define SLAB_MIN_SETSIZE _BITSET_BITS 3799b78b1f4SJeff Roberson BITSET_DEFINE(noslabbits, 0); 380099a0e58SBosko Milekic 381ef72505eSJeff Roberson /* 382ef72505eSJeff Roberson * The slab structure manages a single contiguous allocation from backing 383ef72505eSJeff Roberson * store and subdivides it into individually allocatable items. 384ef72505eSJeff Roberson */ 385ef72505eSJeff Roberson struct uma_slab { 3866d6a03d7SJeff Roberson LIST_ENTRY(uma_slab) us_link; /* slabs in zone */ 38785dcf349SGleb Smirnoff uint16_t us_freecount; /* How many are free? */ 38885dcf349SGleb Smirnoff uint8_t us_flags; /* Page flags see uma.h */ 389ab3185d1SJeff Roberson uint8_t us_domain; /* Backing NUMA domain. */ 390815db204SRyan Libby struct noslabbits us_free; /* Free bitmask, flexible. */ 391099a0e58SBosko Milekic }; 392815db204SRyan Libby _Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free), 393815db204SRyan Libby "us_free field must be last"); 394ab3185d1SJeff Roberson #if MAXMEMDOM >= 255 395ab3185d1SJeff Roberson #error "Slab domain type insufficient" 396ab3185d1SJeff Roberson #endif 397ab3185d1SJeff Roberson 398099a0e58SBosko Milekic typedef struct uma_slab * uma_slab_t; 399e20a199fSJeff Roberson 400815db204SRyan Libby /* 401815db204SRyan Libby * On INVARIANTS builds, the slab contains a second bitset of the same size, 402815db204SRyan Libby * "dbg_bits", which is laid out immediately after us_free. 403815db204SRyan Libby */ 404815db204SRyan Libby #ifdef INVARIANTS 405815db204SRyan Libby #define SLAB_BITSETS 2 406815db204SRyan Libby #else 407815db204SRyan Libby #define SLAB_BITSETS 1 408815db204SRyan Libby #endif 409815db204SRyan Libby 4109b78b1f4SJeff Roberson /* These three functions are for embedded (!OFFPAGE) use only. */ 4119b78b1f4SJeff Roberson size_t slab_sizeof(int nitems); 4129b78b1f4SJeff Roberson size_t slab_space(int nitems); 4139b78b1f4SJeff Roberson int slab_ipers(size_t size, int align); 4149b78b1f4SJeff Roberson 4151e0701e1SJeff Roberson /* 4161e0701e1SJeff Roberson * Slab structure with a full sized bitset and hash link for both 4171e0701e1SJeff Roberson * HASH and OFFPAGE zones. 4181e0701e1SJeff Roberson */ 4191e0701e1SJeff Roberson struct uma_hash_slab { 4201e0701e1SJeff Roberson LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */ 4211e0701e1SJeff Roberson uint8_t *uhs_data; /* First item */ 422*9b8db4d0SRyan Libby struct uma_slab uhs_slab; /* Must be last. */ 4231e0701e1SJeff Roberson }; 4241e0701e1SJeff Roberson 4251e0701e1SJeff Roberson typedef struct uma_hash_slab * uma_hash_slab_t; 4261e0701e1SJeff Roberson 427*9b8db4d0SRyan Libby static inline uma_hash_slab_t 428*9b8db4d0SRyan Libby slab_tohashslab(uma_slab_t slab) 429*9b8db4d0SRyan Libby { 430*9b8db4d0SRyan Libby 431*9b8db4d0SRyan Libby return (__containerof(slab, struct uma_hash_slab, uhs_slab)); 432*9b8db4d0SRyan Libby } 433*9b8db4d0SRyan Libby 4341e0701e1SJeff Roberson static inline void * 4351e0701e1SJeff Roberson slab_data(uma_slab_t slab, uma_keg_t keg) 4361e0701e1SJeff Roberson { 4371e0701e1SJeff Roberson 43854c5ae80SRyan Libby if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0) 4391e0701e1SJeff Roberson return ((void *)((uintptr_t)slab - keg->uk_pgoff)); 4401e0701e1SJeff Roberson else 441*9b8db4d0SRyan Libby return (slab_tohashslab(slab)->uhs_data); 4421e0701e1SJeff Roberson } 4431e0701e1SJeff Roberson 4441e0701e1SJeff Roberson static inline void * 4451e0701e1SJeff Roberson slab_item(uma_slab_t slab, uma_keg_t keg, int index) 4461e0701e1SJeff Roberson { 4471e0701e1SJeff Roberson uintptr_t data; 4481e0701e1SJeff Roberson 4491e0701e1SJeff Roberson data = (uintptr_t)slab_data(slab, keg); 4501e0701e1SJeff Roberson return ((void *)(data + keg->uk_rsize * index)); 4511e0701e1SJeff Roberson } 4521e0701e1SJeff Roberson 4531e0701e1SJeff Roberson static inline int 4541e0701e1SJeff Roberson slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item) 4551e0701e1SJeff Roberson { 4561e0701e1SJeff Roberson uintptr_t data; 4571e0701e1SJeff Roberson 4581e0701e1SJeff Roberson data = (uintptr_t)slab_data(slab, keg); 4591e0701e1SJeff Roberson return (((uintptr_t)item - data) / keg->uk_rsize); 4601e0701e1SJeff Roberson } 461815db204SRyan Libby #endif /* _KERNEL */ 4621e0701e1SJeff Roberson 46308cfa56eSMark Johnston TAILQ_HEAD(uma_bucketlist, uma_bucket); 46408cfa56eSMark Johnston 465ab3185d1SJeff Roberson struct uma_zone_domain { 46608cfa56eSMark Johnston struct uma_bucketlist uzd_buckets; /* full buckets */ 46791d947bfSJeff Roberson uma_bucket_t uzd_cross; /* Fills from cross buckets. */ 4680f9b7bf3SMark Johnston long uzd_nitems; /* total item count */ 4690f9b7bf3SMark Johnston long uzd_imax; /* maximum item count this period */ 4700f9b7bf3SMark Johnston long uzd_imin; /* minimum item count this period */ 4710f9b7bf3SMark Johnston long uzd_wss; /* working set size estimate */ 472727c6918SJeff Roberson } __aligned(CACHE_LINE_SIZE); 473ab3185d1SJeff Roberson 474ab3185d1SJeff Roberson typedef struct uma_zone_domain * uma_zone_domain_t; 475ab3185d1SJeff Roberson 476244f4554SBosko Milekic /* 4774bd61e19SJeff Roberson * Zone structure - per memory type. 4788355f576SJeff Roberson */ 4798355f576SJeff Roberson struct uma_zone { 48063b5557bSJeff Roberson /* Offset 0, used in alloc/free fast/medium fast path and const. */ 481727c6918SJeff Roberson uma_keg_t uz_keg; /* This zone's keg if !CACHE */ 482ab3185d1SJeff Roberson struct uma_zone_domain *uz_domain; /* per-domain buckets */ 48363b5557bSJeff Roberson uint32_t uz_flags; /* Flags inherited from kegs */ 48463b5557bSJeff Roberson uint32_t uz_size; /* Size inherited from kegs */ 4858355f576SJeff Roberson uma_ctor uz_ctor; /* Constructor for each allocation */ 4868355f576SJeff Roberson uma_dtor uz_dtor; /* Destructor */ 4874bd61e19SJeff Roberson uint64_t uz_spare0; 488bb15d1c7SGleb Smirnoff uint64_t uz_max_items; /* Maximum number of items to alloc */ 4894bd61e19SJeff Roberson uint32_t uz_sleepers; /* Threads sleeping on limit */ 49020a4e154SJeff Roberson uint16_t uz_bucket_size; /* Number of items in full bucket */ 49120a4e154SJeff Roberson uint16_t uz_bucket_size_max; /* Maximum number of bucket items */ 49263b5557bSJeff Roberson 49363b5557bSJeff Roberson /* Offset 64, used in bucket replenish. */ 4940095a784SJeff Roberson uma_import uz_import; /* Import new memory to cache. */ 4950095a784SJeff Roberson uma_release uz_release; /* Release memory from cache. */ 4960095a784SJeff Roberson void *uz_arg; /* Import/release argument. */ 497bb15d1c7SGleb Smirnoff uma_init uz_init; /* Initializer for each item */ 498bb15d1c7SGleb Smirnoff uma_fini uz_fini; /* Finalizer for each item. */ 4994bd61e19SJeff Roberson void *uz_spare1; 500bb15d1c7SGleb Smirnoff uint64_t uz_bkt_count; /* Items in bucket cache */ 501bb15d1c7SGleb Smirnoff uint64_t uz_bkt_max; /* Maximum bucket cache size */ 502099a0e58SBosko Milekic 50363b5557bSJeff Roberson /* Offset 128 Rare. */ 50463b5557bSJeff Roberson /* 50563b5557bSJeff Roberson * The lock is placed here to avoid adjacent line prefetcher 50663b5557bSJeff Roberson * in fast paths and to take up space near infrequently accessed 50763b5557bSJeff Roberson * members to reduce alignment overhead. 50863b5557bSJeff Roberson */ 50963b5557bSJeff Roberson struct mtx uz_lock; /* Lock for the zone */ 510bb15d1c7SGleb Smirnoff LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 511bb15d1c7SGleb Smirnoff const char *uz_name; /* Text name of the zone */ 51263b5557bSJeff Roberson /* The next two fields are used to print a rate-limited warnings. */ 51363b5557bSJeff Roberson const char *uz_warning; /* Warning to print on failure */ 51463b5557bSJeff Roberson struct timeval uz_ratecheck; /* Warnings rate-limiting */ 51563b5557bSJeff Roberson struct task uz_maxaction; /* Task to run when at limit */ 51620a4e154SJeff Roberson uint16_t uz_bucket_size_min; /* Min number of items in bucket */ 5175e4bb93cSKip Macy 51891d947bfSJeff Roberson struct mtx_padalign uz_cross_lock; /* Cross domain free lock */ 51991d947bfSJeff Roberson 52020a4e154SJeff Roberson /* Offset 256+, stats and misc. */ 5212efcc8cbSGleb Smirnoff counter_u64_t uz_allocs; /* Total number of allocations */ 5222efcc8cbSGleb Smirnoff counter_u64_t uz_frees; /* Total number of frees */ 5232efcc8cbSGleb Smirnoff counter_u64_t uz_fails; /* Total number of alloc failures */ 52485dcf349SGleb Smirnoff uint64_t uz_sleeps; /* Total number of alloc sleeps */ 525c1685086SJeff Roberson uint64_t uz_xdomain; /* Total number of cross-domain frees */ 5264bd61e19SJeff Roberson volatile uint64_t uz_items; /* Total items count & sleepers */ 5274bd61e19SJeff Roberson 52820a4e154SJeff Roberson char *uz_ctlname; /* sysctl safe name string. */ 52920a4e154SJeff Roberson struct sysctl_oid *uz_oid; /* sysctl oid pointer. */ 53020a4e154SJeff Roberson int uz_namecnt; /* duplicate name count. */ 53154503a13SJonathan T. Looney 5328355f576SJeff Roberson /* 5338355f576SJeff Roberson * This HAS to be the last item because we adjust the zone size 5348355f576SJeff Roberson * based on NCPU and then allocate the space for the zones. 5358355f576SJeff Roberson */ 536ab3185d1SJeff Roberson struct uma_cache uz_cpu[]; /* Per cpu caches */ 537ab3185d1SJeff Roberson 538ab3185d1SJeff Roberson /* uz_domain follows here. */ 5398355f576SJeff Roberson }; 5408355f576SJeff Roberson 541b60f5b79SJeff Roberson /* 5424bd61e19SJeff Roberson * Macros for interpreting the uz_items field. 20 bits of sleeper count 5434bd61e19SJeff Roberson * and 44 bit of item count. 5444bd61e19SJeff Roberson */ 5454bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPER_SHIFT 44LL 5464bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPERS_MAX ((1 << (64 - UZ_ITEMS_SLEEPER_SHIFT)) - 1) 5474bd61e19SJeff Roberson #define UZ_ITEMS_COUNT_MASK ((1LL << UZ_ITEMS_SLEEPER_SHIFT) - 1) 5484bd61e19SJeff Roberson #define UZ_ITEMS_COUNT(x) ((x) & UZ_ITEMS_COUNT_MASK) 5494bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPERS(x) ((x) >> UZ_ITEMS_SLEEPER_SHIFT) 5504bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPER (1LL << UZ_ITEMS_SLEEPER_SHIFT) 5514bd61e19SJeff Roberson 552727c6918SJeff Roberson #define ZONE_ASSERT_COLD(z) \ 55331c251a0SJeff Roberson KASSERT(uma_zone_get_allocs((z)) == 0, \ 554727c6918SJeff Roberson ("zone %s initialization after use.", (z)->uz_name)) 555727c6918SJeff Roberson 5565e4bb93cSKip Macy #undef UMA_ALIGN 5575e4bb93cSKip Macy 558af17e9a9SRobert Watson #ifdef _KERNEL 5598355f576SJeff Roberson /* Internal prototypes */ 56085dcf349SGleb Smirnoff static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); 5618355f576SJeff Roberson 5628355f576SJeff Roberson /* Lock Macros */ 5638355f576SJeff Roberson 5648b987a77SJeff Roberson #define KEG_LOCKPTR(k, d) (struct mtx *)&(k)->uk_domain[(d)].ud_lock 5658b987a77SJeff Roberson #define KEG_LOCK_INIT(k, d, lc) \ 56628bc4419SJeff Roberson do { \ 56728bc4419SJeff Roberson if ((lc)) \ 5688b987a77SJeff Roberson mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \ 569e20a199fSJeff Roberson (k)->uk_name, MTX_DEF | MTX_DUPOK); \ 57028bc4419SJeff Roberson else \ 5718b987a77SJeff Roberson mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \ 57228bc4419SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 57328bc4419SJeff Roberson } while (0) 57428bc4419SJeff Roberson 5758b987a77SJeff Roberson #define KEG_LOCK_FINI(k, d) mtx_destroy(KEG_LOCKPTR(k, d)) 5768b987a77SJeff Roberson #define KEG_LOCK(k, d) \ 5778b987a77SJeff Roberson ({ mtx_lock(KEG_LOCKPTR(k, d)); KEG_LOCKPTR(k, d); }) 5788b987a77SJeff Roberson #define KEG_UNLOCK(k, d) mtx_unlock(KEG_LOCKPTR(k, d)) 5798b987a77SJeff Roberson #define KEG_LOCK_ASSERT(k, d) mtx_assert(KEG_LOCKPTR(k, d), MA_OWNED) 580bb15d1c7SGleb Smirnoff 581bb15d1c7SGleb Smirnoff #define KEG_GET(zone, keg) do { \ 582bb15d1c7SGleb Smirnoff (keg) = (zone)->uz_keg; \ 583bb15d1c7SGleb Smirnoff KASSERT((void *)(keg) != (void *)&(zone)->uz_lock, \ 584bb15d1c7SGleb Smirnoff ("%s: Invalid zone %p type", __func__, (zone))); \ 585bb15d1c7SGleb Smirnoff } while (0) 586af526374SJeff Roberson 587af526374SJeff Roberson #define ZONE_LOCK_INIT(z, lc) \ 588af526374SJeff Roberson do { \ 589af526374SJeff Roberson if ((lc)) \ 590af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 591af526374SJeff Roberson (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 592af526374SJeff Roberson else \ 593af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 594af526374SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 595af526374SJeff Roberson } while (0) 596af526374SJeff Roberson 597727c6918SJeff Roberson #define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock) 598727c6918SJeff Roberson #define ZONE_TRYLOCK(z) mtx_trylock(&(z)->uz_lock) 599727c6918SJeff Roberson #define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock) 600af526374SJeff Roberson #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) 601727c6918SJeff Roberson #define ZONE_LOCK_ASSERT(z) mtx_assert(&(z)->uz_lock, MA_OWNED) 6028355f576SJeff Roberson 60391d947bfSJeff Roberson #define ZONE_CROSS_LOCK_INIT(z) \ 60491d947bfSJeff Roberson mtx_init(&(z)->uz_cross_lock, "UMA Cross", NULL, MTX_DEF) 60591d947bfSJeff Roberson #define ZONE_CROSS_LOCK(z) mtx_lock(&(z)->uz_cross_lock) 60691d947bfSJeff Roberson #define ZONE_CROSS_UNLOCK(z) mtx_unlock(&(z)->uz_cross_lock) 60791d947bfSJeff Roberson #define ZONE_CROSS_LOCK_FINI(z) mtx_destroy(&(z)->uz_cross_lock) 60891d947bfSJeff Roberson 6098355f576SJeff Roberson /* 6108355f576SJeff Roberson * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 6118355f576SJeff Roberson * the slab structure. 6128355f576SJeff Roberson * 6138355f576SJeff Roberson * Arguments: 6148355f576SJeff Roberson * hash The hash table to search. 6158355f576SJeff Roberson * data The base page of the item. 6168355f576SJeff Roberson * 6178355f576SJeff Roberson * Returns: 6188355f576SJeff Roberson * A pointer to a slab if successful, else NULL. 6198355f576SJeff Roberson */ 6208355f576SJeff Roberson static __inline uma_slab_t 62185dcf349SGleb Smirnoff hash_sfind(struct uma_hash *hash, uint8_t *data) 6228355f576SJeff Roberson { 6231e0701e1SJeff Roberson uma_hash_slab_t slab; 6246929b7d1SPedro F. Giffuni u_int hval; 6258355f576SJeff Roberson 6268355f576SJeff Roberson hval = UMA_HASH(hash, data); 6278355f576SJeff Roberson 6281e0701e1SJeff Roberson LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) { 6291e0701e1SJeff Roberson if ((uint8_t *)slab->uhs_data == data) 6301e0701e1SJeff Roberson return (&slab->uhs_slab); 6318355f576SJeff Roberson } 6328355f576SJeff Roberson return (NULL); 6338355f576SJeff Roberson } 6348355f576SJeff Roberson 63599571dc3SJeff Roberson static __inline uma_slab_t 63699571dc3SJeff Roberson vtoslab(vm_offset_t va) 63799571dc3SJeff Roberson { 63899571dc3SJeff Roberson vm_page_t p; 63999571dc3SJeff Roberson 64099571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 641584061b4SJeff Roberson return (p->plinks.uma.slab); 64299571dc3SJeff Roberson } 64399571dc3SJeff Roberson 64499571dc3SJeff Roberson static __inline void 645584061b4SJeff Roberson vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab) 64699571dc3SJeff Roberson { 64799571dc3SJeff Roberson vm_page_t p; 64899571dc3SJeff Roberson 6496fc96493SOlivier Houchard p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 650584061b4SJeff Roberson *slab = p->plinks.uma.slab; 651584061b4SJeff Roberson *zone = p->plinks.uma.zone; 652584061b4SJeff Roberson } 653584061b4SJeff Roberson 654584061b4SJeff Roberson static __inline void 655584061b4SJeff Roberson vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab) 656584061b4SJeff Roberson { 657584061b4SJeff Roberson vm_page_t p; 658584061b4SJeff Roberson 659584061b4SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 660584061b4SJeff Roberson p->plinks.uma.slab = slab; 661584061b4SJeff Roberson p->plinks.uma.zone = zone; 66299571dc3SJeff Roberson } 66399571dc3SJeff Roberson 6646d6a03d7SJeff Roberson extern unsigned long uma_kmem_limit; 6656d6a03d7SJeff Roberson extern unsigned long uma_kmem_total; 6666d6a03d7SJeff Roberson 6676d6a03d7SJeff Roberson /* Adjust bytes under management by UMA. */ 6686d6a03d7SJeff Roberson static inline void 6696d6a03d7SJeff Roberson uma_total_dec(unsigned long size) 6706d6a03d7SJeff Roberson { 6716d6a03d7SJeff Roberson 6726d6a03d7SJeff Roberson atomic_subtract_long(&uma_kmem_total, size); 6736d6a03d7SJeff Roberson } 6746d6a03d7SJeff Roberson 6756d6a03d7SJeff Roberson static inline void 6766d6a03d7SJeff Roberson uma_total_inc(unsigned long size) 6776d6a03d7SJeff Roberson { 6786d6a03d7SJeff Roberson 6796d6a03d7SJeff Roberson if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) 6806d6a03d7SJeff Roberson uma_reclaim_wakeup(); 6816d6a03d7SJeff Roberson } 6826d6a03d7SJeff Roberson 68348eea375SJeff Roberson /* 68448eea375SJeff Roberson * The following two functions may be defined by architecture specific code 685763df3ecSPedro F. Giffuni * if they can provide more efficient allocation functions. This is useful 68648eea375SJeff Roberson * for using direct mapped addresses. 68748eea375SJeff Roberson */ 688ab3185d1SJeff Roberson void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, 689ab3185d1SJeff Roberson uint8_t *pflag, int wait); 690f2c2231eSRyan Stone void uma_small_free(void *mem, vm_size_t size, uint8_t flags); 6912e47807cSJeff Roberson 6922e47807cSJeff Roberson /* Set a global soft limit on UMA managed memory. */ 6932e47807cSJeff Roberson void uma_set_limit(unsigned long limit); 694af17e9a9SRobert Watson #endif /* _KERNEL */ 69548eea375SJeff Roberson 6968355f576SJeff Roberson #endif /* VM_UMA_INT_H */ 697