160727d8bSWarner Losh /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4584061b4SJeff Roberson * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org> 508ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 608ecce74SRobert Watson * All rights reserved. 78355f576SJeff Roberson * 88355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 98355f576SJeff Roberson * modification, are permitted provided that the following conditions 108355f576SJeff Roberson * are met: 118355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 128355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 138355f576SJeff Roberson * disclaimer. 148355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 158355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 168355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 178355f576SJeff Roberson * 188355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 198355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 208355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 218355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 228355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 238355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 248355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 258355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 268355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 278355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 288355f576SJeff Roberson * 298355f576SJeff Roberson * $FreeBSD$ 308355f576SJeff Roberson * 318355f576SJeff Roberson */ 328355f576SJeff Roberson 3339669415SGleb Smirnoff #include <sys/counter.h> 34e04223bfSMark Johnston #include <sys/_bitset.h> 357571e249SMark Johnston #include <sys/_domainset.h> 36b28cc462SGleb Smirnoff #include <sys/_task.h> 37b28cc462SGleb Smirnoff 388355f576SJeff Roberson /* 398355f576SJeff Roberson * This file includes definitions, structures, prototypes, and inlines that 408355f576SJeff Roberson * should not be used outside of the actual implementation of UMA. 418355f576SJeff Roberson */ 428355f576SJeff Roberson 438355f576SJeff Roberson /* 44ab3185d1SJeff Roberson * The brief summary; Zones describe unique allocation types. Zones are 45ab3185d1SJeff Roberson * organized into per-CPU caches which are filled by buckets. Buckets are 46ab3185d1SJeff Roberson * organized according to memory domains. Buckets are filled from kegs which 47ab3185d1SJeff Roberson * are also organized according to memory domains. Kegs describe a unique 48ab3185d1SJeff Roberson * allocation type, backend memory provider, and layout. Kegs are associated 49ab3185d1SJeff Roberson * with one or more zones and zones reference one or more kegs. Kegs provide 50ab3185d1SJeff Roberson * slabs which are virtually contiguous collections of pages. Each slab is 51ab3185d1SJeff Roberson * broken down int one or more items that will satisfy an individual allocation. 52ab3185d1SJeff Roberson * 53ab3185d1SJeff Roberson * Allocation is satisfied in the following order: 54ab3185d1SJeff Roberson * 1) Per-CPU cache 55ab3185d1SJeff Roberson * 2) Per-domain cache of buckets 56ab3185d1SJeff Roberson * 3) Slab from any of N kegs 57ab3185d1SJeff Roberson * 4) Backend page provider 58ab3185d1SJeff Roberson * 59ab3185d1SJeff Roberson * More detail on individual objects is contained below: 608355f576SJeff Roberson * 61099a0e58SBosko Milekic * Kegs contain lists of slabs which are stored in either the full bin, empty 628355f576SJeff Roberson * bin, or partially allocated bin, to reduce fragmentation. They also contain 638355f576SJeff Roberson * the user supplied value for size, which is adjusted for alignment purposes 64099a0e58SBosko Milekic * and rsize is the result of that. The Keg also stores information for 658355f576SJeff Roberson * managing a hash of page addresses that maps pages to uma_slab_t structures 668355f576SJeff Roberson * for pages that don't have embedded uma_slab_t's. 678355f576SJeff Roberson * 68ab3185d1SJeff Roberson * Keg slab lists are organized by memory domain to support NUMA allocation 69ab3185d1SJeff Roberson * policies. By default allocations are spread across domains to reduce the 70ab3185d1SJeff Roberson * potential for hotspots. Special keg creation flags may be specified to 71ab3185d1SJeff Roberson * prefer location allocation. However there is no strict enforcement as frees 72ab3185d1SJeff Roberson * may happen on any CPU and these are returned to the CPU-local cache 73ab3185d1SJeff Roberson * regardless of the originating domain. 74ab3185d1SJeff Roberson * 758355f576SJeff Roberson * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 768355f576SJeff Roberson * be allocated off the page from a special slab zone. The free list within a 77ef72505eSJeff Roberson * slab is managed with a bitmask. For item sizes that would yield more than 78ef72505eSJeff Roberson * 10% memory waste we potentially allocate a separate uma_slab_t if this will 79ef72505eSJeff Roberson * improve the number of items per slab that will fit. 808355f576SJeff Roberson * 818355f576SJeff Roberson * The only really gross cases, with regards to memory waste, are for those 828355f576SJeff Roberson * items that are just over half the page size. You can get nearly 50% waste, 838355f576SJeff Roberson * so you fall back to the memory footprint of the power of two allocator. I 848355f576SJeff Roberson * have looked at memory allocation sizes on many of the machines available to 858355f576SJeff Roberson * me, and there does not seem to be an abundance of allocations at this range 868355f576SJeff Roberson * so at this time it may not make sense to optimize for it. This can, of 878355f576SJeff Roberson * course, be solved with dynamic slab sizes. 888355f576SJeff Roberson * 89099a0e58SBosko Milekic * Kegs may serve multiple Zones but by far most of the time they only serve 90099a0e58SBosko Milekic * one. When a Zone is created, a Keg is allocated and setup for it. While 91099a0e58SBosko Milekic * the backing Keg stores slabs, the Zone caches Buckets of items allocated 92099a0e58SBosko Milekic * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 93099a0e58SBosko Milekic * pair, as well as with its own set of small per-CPU caches, layered above 94099a0e58SBosko Milekic * the Zone's general Bucket cache. 95099a0e58SBosko Milekic * 966ab3b958SRobert Watson * The PCPU caches are protected by critical sections, and may be accessed 976ab3b958SRobert Watson * safely only from their associated CPU, while the Zones backed by the same 986ab3b958SRobert Watson * Keg all share a common Keg lock (to coalesce contention on the backing 996ab3b958SRobert Watson * slabs). The backing Keg typically only serves one Zone but in the case of 1006ab3b958SRobert Watson * multiple Zones, one of the Zones is considered the Master Zone and all 1016ab3b958SRobert Watson * Zone-related stats from the Keg are done in the Master Zone. For an 1026ab3b958SRobert Watson * example of a Multi-Zone setup, refer to the Mbuf allocation code. 1038355f576SJeff Roberson */ 1048355f576SJeff Roberson 1058355f576SJeff Roberson /* 1068355f576SJeff Roberson * This is the representation for normal (Non OFFPAGE slab) 1078355f576SJeff Roberson * 1088355f576SJeff Roberson * i == item 1098355f576SJeff Roberson * s == slab pointer 1108355f576SJeff Roberson * 1118355f576SJeff Roberson * <---------------- Page (UMA_SLAB_SIZE) ------------------> 1128355f576SJeff Roberson * ___________________________________________________________ 1138355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 1148355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 1158355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 1168355f576SJeff Roberson * |___________________________________________________________| 1178355f576SJeff Roberson * 1188355f576SJeff Roberson * 1198355f576SJeff Roberson * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 1208355f576SJeff Roberson * 1218355f576SJeff Roberson * ___________________________________________________________ 1228355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 1238355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 1248355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 1258355f576SJeff Roberson * |___________________________________________________________| 1268355f576SJeff Roberson * ___________ ^ 1278355f576SJeff Roberson * |slab header| | 1288355f576SJeff Roberson * |___________|---* 1298355f576SJeff Roberson * 1308355f576SJeff Roberson */ 1318355f576SJeff Roberson 1328355f576SJeff Roberson #ifndef VM_UMA_INT_H 1338355f576SJeff Roberson #define VM_UMA_INT_H 1348355f576SJeff Roberson 1358355f576SJeff Roberson #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 1368355f576SJeff Roberson #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 1378355f576SJeff Roberson #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 1388355f576SJeff Roberson 139ad97af7eSGleb Smirnoff /* Max waste percentage before going to off page slab management */ 140ad97af7eSGleb Smirnoff #define UMA_MAX_WASTE 10 1418355f576SJeff Roberson 1428355f576SJeff Roberson 1438355f576SJeff Roberson /* 1441e0701e1SJeff Roberson * Hash table for freed address -> slab translation. 1451e0701e1SJeff Roberson * 1461e0701e1SJeff Roberson * Only zones with memory not touchable by the allocator use the 1471e0701e1SJeff Roberson * hash table. Otherwise slabs are found with vtoslab(). 1488355f576SJeff Roberson */ 1491e0701e1SJeff Roberson #define UMA_HASH_SIZE_INIT 32 1508355f576SJeff Roberson 151ef72505eSJeff Roberson #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask) 1528355f576SJeff Roberson 1538355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem) \ 1541e0701e1SJeff Roberson LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 1551e0701e1SJeff Roberson (mem))], (uma_hash_slab_t)(s), uhs_hlink) 1568355f576SJeff Roberson 1571e0701e1SJeff Roberson #define UMA_HASH_REMOVE(h, s) \ 1581e0701e1SJeff Roberson LIST_REMOVE((uma_hash_slab_t)(s), uhs_hlink) 1598355f576SJeff Roberson 1601e0701e1SJeff Roberson LIST_HEAD(slabhashhead, uma_hash_slab); 1618355f576SJeff Roberson 1628355f576SJeff Roberson struct uma_hash { 1631e0701e1SJeff Roberson struct slabhashhead *uh_slab_hash; /* Hash table for slabs */ 1646929b7d1SPedro F. Giffuni u_int uh_hashsize; /* Current size of the hash table */ 1656929b7d1SPedro F. Giffuni u_int uh_hashmask; /* Mask used during hashing */ 1668355f576SJeff Roberson }; 1678355f576SJeff Roberson 1688355f576SJeff Roberson /* 1695e4bb93cSKip Macy * align field or structure to cache line 1705e4bb93cSKip Macy */ 17112f69195SJustin Hibbits #if defined(__amd64__) || defined(__powerpc64__) 172782e38aaSMateusz Guzik #define UMA_ALIGN __aligned(128) 1731a23373cSKip Macy #else 174376b1ba3SJeff Roberson #define UMA_ALIGN __aligned(CACHE_LINE_SIZE) 1751a23373cSKip Macy #endif 1765e4bb93cSKip Macy 1775e4bb93cSKip Macy /* 178376b1ba3SJeff Roberson * The uma_bucket structure is used to queue and manage buckets divorced 179376b1ba3SJeff Roberson * from per-cpu caches. They are loaded into uma_cache_bucket structures 180376b1ba3SJeff Roberson * for use. 1818355f576SJeff Roberson */ 1828355f576SJeff Roberson struct uma_bucket { 18308cfa56eSMark Johnston TAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 184306abf0fSGleb Smirnoff int16_t ub_cnt; /* Count of items in bucket. */ 185cae33c14SJeff Roberson int16_t ub_entries; /* Max items. */ 186cae33c14SJeff Roberson void *ub_bucket[]; /* actual allocation storage */ 1871a23373cSKip Macy }; 1888355f576SJeff Roberson 1898355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t; 1908355f576SJeff Roberson 191376b1ba3SJeff Roberson /* 192376b1ba3SJeff Roberson * The uma_cache_bucket structure is statically allocated on each per-cpu 193376b1ba3SJeff Roberson * cache. Its use reduces branches and cache misses in the fast path. 194376b1ba3SJeff Roberson */ 195376b1ba3SJeff Roberson struct uma_cache_bucket { 196376b1ba3SJeff Roberson uma_bucket_t ucb_bucket; 197376b1ba3SJeff Roberson int16_t ucb_cnt; 198376b1ba3SJeff Roberson int16_t ucb_entries; 199376b1ba3SJeff Roberson uint32_t ucb_spare; 200376b1ba3SJeff Roberson }; 201376b1ba3SJeff Roberson 202376b1ba3SJeff Roberson typedef struct uma_cache_bucket * uma_cache_bucket_t; 203376b1ba3SJeff Roberson 204376b1ba3SJeff Roberson /* 205376b1ba3SJeff Roberson * The uma_cache structure is allocated for each cpu for every zone 206376b1ba3SJeff Roberson * type. This optimizes synchronization out of the allocator fast path. 207376b1ba3SJeff Roberson */ 2088355f576SJeff Roberson struct uma_cache { 209376b1ba3SJeff Roberson struct uma_cache_bucket uc_freebucket; /* Bucket we're freeing to */ 210376b1ba3SJeff Roberson struct uma_cache_bucket uc_allocbucket; /* Bucket to allocate from */ 211376b1ba3SJeff Roberson struct uma_cache_bucket uc_crossbucket; /* cross domain bucket */ 21285dcf349SGleb Smirnoff uint64_t uc_allocs; /* Count of allocations */ 21385dcf349SGleb Smirnoff uint64_t uc_frees; /* Count of frees */ 2145e4bb93cSKip Macy } UMA_ALIGN; 2158355f576SJeff Roberson 2168355f576SJeff Roberson typedef struct uma_cache * uma_cache_t; 2178355f576SJeff Roberson 2181e0701e1SJeff Roberson LIST_HEAD(slabhead, uma_slab); 2191e0701e1SJeff Roberson 2208355f576SJeff Roberson /* 221cc7ce83aSJeff Roberson * The cache structure pads perfectly into 64 bytes so we use spare 222cc7ce83aSJeff Roberson * bits from the embedded cache buckets to store information from the zone 223cc7ce83aSJeff Roberson * and keep all fast-path allocations accessing a single per-cpu line. 224cc7ce83aSJeff Roberson */ 225cc7ce83aSJeff Roberson static inline void 226cc7ce83aSJeff Roberson cache_set_uz_flags(uma_cache_t cache, uint32_t flags) 227cc7ce83aSJeff Roberson { 228cc7ce83aSJeff Roberson 229cc7ce83aSJeff Roberson cache->uc_freebucket.ucb_spare = flags; 230cc7ce83aSJeff Roberson } 231cc7ce83aSJeff Roberson 232cc7ce83aSJeff Roberson static inline void 233cc7ce83aSJeff Roberson cache_set_uz_size(uma_cache_t cache, uint32_t size) 234cc7ce83aSJeff Roberson { 235cc7ce83aSJeff Roberson 236cc7ce83aSJeff Roberson cache->uc_allocbucket.ucb_spare = size; 237cc7ce83aSJeff Roberson } 238cc7ce83aSJeff Roberson 239cc7ce83aSJeff Roberson static inline uint32_t 240cc7ce83aSJeff Roberson cache_uz_flags(uma_cache_t cache) 241cc7ce83aSJeff Roberson { 242cc7ce83aSJeff Roberson 243cc7ce83aSJeff Roberson return (cache->uc_freebucket.ucb_spare); 244cc7ce83aSJeff Roberson } 245cc7ce83aSJeff Roberson 246cc7ce83aSJeff Roberson static inline uint32_t 247cc7ce83aSJeff Roberson cache_uz_size(uma_cache_t cache) 248cc7ce83aSJeff Roberson { 249cc7ce83aSJeff Roberson 250cc7ce83aSJeff Roberson return (cache->uc_allocbucket.ucb_spare); 251cc7ce83aSJeff Roberson } 252cc7ce83aSJeff Roberson 253cc7ce83aSJeff Roberson /* 254376b1ba3SJeff Roberson * Per-domain slab lists. Embedded in the kegs. 255ab3185d1SJeff Roberson */ 256ab3185d1SJeff Roberson struct uma_domain { 2571e0701e1SJeff Roberson struct slabhead ud_part_slab; /* partially allocated slabs */ 2581e0701e1SJeff Roberson struct slabhead ud_free_slab; /* completely unallocated slabs */ 2591e0701e1SJeff Roberson struct slabhead ud_full_slab; /* fully allocated slabs */ 260ab3185d1SJeff Roberson }; 261ab3185d1SJeff Roberson 262ab3185d1SJeff Roberson typedef struct uma_domain * uma_domain_t; 263ab3185d1SJeff Roberson 264ab3185d1SJeff Roberson /* 265099a0e58SBosko Milekic * Keg management structure 266099a0e58SBosko Milekic * 267099a0e58SBosko Milekic * TODO: Optimize for cache line size 268099a0e58SBosko Milekic * 269099a0e58SBosko Milekic */ 270099a0e58SBosko Milekic struct uma_keg { 271bb15d1c7SGleb Smirnoff struct mtx uk_lock; /* Lock for the keg must be first. 272bb15d1c7SGleb Smirnoff * See shared uz_keg/uz_lockptr 273bb15d1c7SGleb Smirnoff * member of struct uma_zone. */ 274099a0e58SBosko Milekic struct uma_hash uk_hash; 275099a0e58SBosko Milekic LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 276099a0e58SBosko Milekic 277194a979eSMark Johnston struct domainset_ref uk_dr; /* Domain selection policy. */ 27885dcf349SGleb Smirnoff uint32_t uk_align; /* Alignment mask */ 27985dcf349SGleb Smirnoff uint32_t uk_pages; /* Total page count */ 28085dcf349SGleb Smirnoff uint32_t uk_free; /* Count of items free in slabs */ 2816fd34d6fSJeff Roberson uint32_t uk_reserve; /* Number of reserved items. */ 28285dcf349SGleb Smirnoff uint32_t uk_size; /* Requested size of each item */ 28385dcf349SGleb Smirnoff uint32_t uk_rsize; /* Real size of each item */ 284099a0e58SBosko Milekic 285099a0e58SBosko Milekic uma_init uk_init; /* Keg's init routine */ 286099a0e58SBosko Milekic uma_fini uk_fini; /* Keg's fini routine */ 287099a0e58SBosko Milekic uma_alloc uk_allocf; /* Allocation function */ 288099a0e58SBosko Milekic uma_free uk_freef; /* Free routine */ 289099a0e58SBosko Milekic 290a4915c21SAttilio Rao u_long uk_offset; /* Next free offset from base KVA */ 291a4915c21SAttilio Rao vm_offset_t uk_kva; /* Zone base KVA */ 292099a0e58SBosko Milekic uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 293099a0e58SBosko Milekic 2942d54d4bbSMark Johnston uint32_t uk_pgoff; /* Offset to uma_slab struct */ 29585dcf349SGleb Smirnoff uint16_t uk_ppera; /* pages per allocation from backend */ 29685dcf349SGleb Smirnoff uint16_t uk_ipers; /* Items per slab */ 29785dcf349SGleb Smirnoff uint32_t uk_flags; /* Internal flags */ 298ad97af7eSGleb Smirnoff 299ad97af7eSGleb Smirnoff /* Least used fields go to the last cache line. */ 300ad97af7eSGleb Smirnoff const char *uk_name; /* Name of creating zone. */ 301ad97af7eSGleb Smirnoff LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 302ab3185d1SJeff Roberson 303ab3185d1SJeff Roberson /* Must be last, variable sized. */ 304ab3185d1SJeff Roberson struct uma_domain uk_domain[]; /* Keg's slab lists. */ 305099a0e58SBosko Milekic }; 306099a0e58SBosko Milekic typedef struct uma_keg * uma_keg_t; 307099a0e58SBosko Milekic 308815db204SRyan Libby #ifdef _KERNEL 309ef72505eSJeff Roberson /* 310ef72505eSJeff Roberson * Free bits per-slab. 311ef72505eSJeff Roberson */ 3129b78b1f4SJeff Roberson #define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT) 3139b78b1f4SJeff Roberson #define SLAB_MIN_SETSIZE _BITSET_BITS 3149b78b1f4SJeff Roberson BITSET_DEFINE(slabbits, SLAB_MAX_SETSIZE); 3159b78b1f4SJeff Roberson BITSET_DEFINE(noslabbits, 0); 316099a0e58SBosko Milekic 317ef72505eSJeff Roberson /* 318ef72505eSJeff Roberson * The slab structure manages a single contiguous allocation from backing 319ef72505eSJeff Roberson * store and subdivides it into individually allocatable items. 320ef72505eSJeff Roberson */ 321ef72505eSJeff Roberson struct uma_slab { 3226d6a03d7SJeff Roberson LIST_ENTRY(uma_slab) us_link; /* slabs in zone */ 32385dcf349SGleb Smirnoff uint16_t us_freecount; /* How many are free? */ 32485dcf349SGleb Smirnoff uint8_t us_flags; /* Page flags see uma.h */ 325ab3185d1SJeff Roberson uint8_t us_domain; /* Backing NUMA domain. */ 326815db204SRyan Libby struct noslabbits us_free; /* Free bitmask, flexible. */ 327099a0e58SBosko Milekic }; 328815db204SRyan Libby _Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free), 329815db204SRyan Libby "us_free field must be last"); 330ab3185d1SJeff Roberson #if MAXMEMDOM >= 255 331ab3185d1SJeff Roberson #error "Slab domain type insufficient" 332ab3185d1SJeff Roberson #endif 333ab3185d1SJeff Roberson 334099a0e58SBosko Milekic typedef struct uma_slab * uma_slab_t; 335e20a199fSJeff Roberson 336815db204SRyan Libby /* 337815db204SRyan Libby * On INVARIANTS builds, the slab contains a second bitset of the same size, 338815db204SRyan Libby * "dbg_bits", which is laid out immediately after us_free. 339815db204SRyan Libby */ 340815db204SRyan Libby #ifdef INVARIANTS 341815db204SRyan Libby #define SLAB_BITSETS 2 342815db204SRyan Libby #else 343815db204SRyan Libby #define SLAB_BITSETS 1 344815db204SRyan Libby #endif 345815db204SRyan Libby 3469b78b1f4SJeff Roberson /* These three functions are for embedded (!OFFPAGE) use only. */ 3479b78b1f4SJeff Roberson size_t slab_sizeof(int nitems); 3489b78b1f4SJeff Roberson size_t slab_space(int nitems); 3499b78b1f4SJeff Roberson int slab_ipers(size_t size, int align); 3509b78b1f4SJeff Roberson 3511e0701e1SJeff Roberson /* 3521e0701e1SJeff Roberson * Slab structure with a full sized bitset and hash link for both 3531e0701e1SJeff Roberson * HASH and OFFPAGE zones. 3541e0701e1SJeff Roberson */ 3551e0701e1SJeff Roberson struct uma_hash_slab { 3561e0701e1SJeff Roberson struct uma_slab uhs_slab; /* Must be first. */ 357815db204SRyan Libby struct slabbits uhs_bits1; /* Must be second. */ 358815db204SRyan Libby #ifdef INVARIANTS 359815db204SRyan Libby struct slabbits uhs_bits2; /* Must be third. */ 360815db204SRyan Libby #endif 3611e0701e1SJeff Roberson LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */ 3621e0701e1SJeff Roberson uint8_t *uhs_data; /* First item */ 3631e0701e1SJeff Roberson }; 3641e0701e1SJeff Roberson 3651e0701e1SJeff Roberson typedef struct uma_hash_slab * uma_hash_slab_t; 3661e0701e1SJeff Roberson 3671e0701e1SJeff Roberson static inline void * 3681e0701e1SJeff Roberson slab_data(uma_slab_t slab, uma_keg_t keg) 3691e0701e1SJeff Roberson { 3701e0701e1SJeff Roberson 3711e0701e1SJeff Roberson if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) 3721e0701e1SJeff Roberson return ((void *)((uintptr_t)slab - keg->uk_pgoff)); 3731e0701e1SJeff Roberson else 3741e0701e1SJeff Roberson return (((uma_hash_slab_t)slab)->uhs_data); 3751e0701e1SJeff Roberson } 3761e0701e1SJeff Roberson 3771e0701e1SJeff Roberson static inline void * 3781e0701e1SJeff Roberson slab_item(uma_slab_t slab, uma_keg_t keg, int index) 3791e0701e1SJeff Roberson { 3801e0701e1SJeff Roberson uintptr_t data; 3811e0701e1SJeff Roberson 3821e0701e1SJeff Roberson data = (uintptr_t)slab_data(slab, keg); 3831e0701e1SJeff Roberson return ((void *)(data + keg->uk_rsize * index)); 3841e0701e1SJeff Roberson } 3851e0701e1SJeff Roberson 3861e0701e1SJeff Roberson static inline int 3871e0701e1SJeff Roberson slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item) 3881e0701e1SJeff Roberson { 3891e0701e1SJeff Roberson uintptr_t data; 3901e0701e1SJeff Roberson 3911e0701e1SJeff Roberson data = (uintptr_t)slab_data(slab, keg); 3921e0701e1SJeff Roberson return (((uintptr_t)item - data) / keg->uk_rsize); 3931e0701e1SJeff Roberson } 394815db204SRyan Libby #endif /* _KERNEL */ 3951e0701e1SJeff Roberson 39608cfa56eSMark Johnston TAILQ_HEAD(uma_bucketlist, uma_bucket); 39708cfa56eSMark Johnston 398ab3185d1SJeff Roberson struct uma_zone_domain { 39908cfa56eSMark Johnston struct uma_bucketlist uzd_buckets; /* full buckets */ 4000f9b7bf3SMark Johnston long uzd_nitems; /* total item count */ 4010f9b7bf3SMark Johnston long uzd_imax; /* maximum item count this period */ 4020f9b7bf3SMark Johnston long uzd_imin; /* minimum item count this period */ 4030f9b7bf3SMark Johnston long uzd_wss; /* working set size estimate */ 404ab3185d1SJeff Roberson }; 405ab3185d1SJeff Roberson 406ab3185d1SJeff Roberson typedef struct uma_zone_domain * uma_zone_domain_t; 407ab3185d1SJeff Roberson 408244f4554SBosko Milekic /* 409*4bd61e19SJeff Roberson * Zone structure - per memory type. 4108355f576SJeff Roberson */ 4118355f576SJeff Roberson struct uma_zone { 41263b5557bSJeff Roberson /* Offset 0, used in alloc/free fast/medium fast path and const. */ 413bb15d1c7SGleb Smirnoff union { 414bb15d1c7SGleb Smirnoff uma_keg_t uz_keg; /* This zone's keg */ 415bb15d1c7SGleb Smirnoff struct mtx *uz_lockptr; /* To keg or to self */ 416bb15d1c7SGleb Smirnoff }; 417ab3185d1SJeff Roberson struct uma_zone_domain *uz_domain; /* per-domain buckets */ 41863b5557bSJeff Roberson uint32_t uz_flags; /* Flags inherited from kegs */ 41963b5557bSJeff Roberson uint32_t uz_size; /* Size inherited from kegs */ 4208355f576SJeff Roberson uma_ctor uz_ctor; /* Constructor for each allocation */ 4218355f576SJeff Roberson uma_dtor uz_dtor; /* Destructor */ 422*4bd61e19SJeff Roberson uint64_t uz_spare0; 423bb15d1c7SGleb Smirnoff uint64_t uz_max_items; /* Maximum number of items to alloc */ 424*4bd61e19SJeff Roberson uint32_t uz_sleepers; /* Threads sleeping on limit */ 42520a4e154SJeff Roberson uint16_t uz_bucket_size; /* Number of items in full bucket */ 42620a4e154SJeff Roberson uint16_t uz_bucket_size_max; /* Maximum number of bucket items */ 42763b5557bSJeff Roberson 42863b5557bSJeff Roberson /* Offset 64, used in bucket replenish. */ 4290095a784SJeff Roberson uma_import uz_import; /* Import new memory to cache. */ 4300095a784SJeff Roberson uma_release uz_release; /* Release memory from cache. */ 4310095a784SJeff Roberson void *uz_arg; /* Import/release argument. */ 432bb15d1c7SGleb Smirnoff uma_init uz_init; /* Initializer for each item */ 433bb15d1c7SGleb Smirnoff uma_fini uz_fini; /* Finalizer for each item. */ 434*4bd61e19SJeff Roberson void *uz_spare1; 435bb15d1c7SGleb Smirnoff uint64_t uz_bkt_count; /* Items in bucket cache */ 436bb15d1c7SGleb Smirnoff uint64_t uz_bkt_max; /* Maximum bucket cache size */ 437099a0e58SBosko Milekic 43863b5557bSJeff Roberson /* Offset 128 Rare. */ 43963b5557bSJeff Roberson /* 44063b5557bSJeff Roberson * The lock is placed here to avoid adjacent line prefetcher 44163b5557bSJeff Roberson * in fast paths and to take up space near infrequently accessed 44263b5557bSJeff Roberson * members to reduce alignment overhead. 44363b5557bSJeff Roberson */ 44463b5557bSJeff Roberson struct mtx uz_lock; /* Lock for the zone */ 445bb15d1c7SGleb Smirnoff LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 446bb15d1c7SGleb Smirnoff const char *uz_name; /* Text name of the zone */ 44763b5557bSJeff Roberson /* The next two fields are used to print a rate-limited warnings. */ 44863b5557bSJeff Roberson const char *uz_warning; /* Warning to print on failure */ 44963b5557bSJeff Roberson struct timeval uz_ratecheck; /* Warnings rate-limiting */ 45063b5557bSJeff Roberson struct task uz_maxaction; /* Task to run when at limit */ 45120a4e154SJeff Roberson uint16_t uz_bucket_size_min; /* Min number of items in bucket */ 4525e4bb93cSKip Macy 45320a4e154SJeff Roberson /* Offset 256+, stats and misc. */ 4542efcc8cbSGleb Smirnoff counter_u64_t uz_allocs; /* Total number of allocations */ 4552efcc8cbSGleb Smirnoff counter_u64_t uz_frees; /* Total number of frees */ 4562efcc8cbSGleb Smirnoff counter_u64_t uz_fails; /* Total number of alloc failures */ 45785dcf349SGleb Smirnoff uint64_t uz_sleeps; /* Total number of alloc sleeps */ 458c1685086SJeff Roberson uint64_t uz_xdomain; /* Total number of cross-domain frees */ 459*4bd61e19SJeff Roberson volatile uint64_t uz_items; /* Total items count & sleepers */ 460*4bd61e19SJeff Roberson 46120a4e154SJeff Roberson char *uz_ctlname; /* sysctl safe name string. */ 46220a4e154SJeff Roberson struct sysctl_oid *uz_oid; /* sysctl oid pointer. */ 46320a4e154SJeff Roberson int uz_namecnt; /* duplicate name count. */ 46454503a13SJonathan T. Looney 4658355f576SJeff Roberson /* 4668355f576SJeff Roberson * This HAS to be the last item because we adjust the zone size 4678355f576SJeff Roberson * based on NCPU and then allocate the space for the zones. 4688355f576SJeff Roberson */ 469ab3185d1SJeff Roberson struct uma_cache uz_cpu[]; /* Per cpu caches */ 470ab3185d1SJeff Roberson 471ab3185d1SJeff Roberson /* uz_domain follows here. */ 4728355f576SJeff Roberson }; 4738355f576SJeff Roberson 474b60f5b79SJeff Roberson /* 475b60f5b79SJeff Roberson * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 476b60f5b79SJeff Roberson */ 477cc7ce83aSJeff Roberson #define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */ 478cc7ce83aSJeff Roberson #define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */ 479bb15d1c7SGleb Smirnoff #define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */ 48008cfa56eSMark Johnston #define UMA_ZFLAG_RECLAIMING 0x08000000 /* Running zone_reclaim(). */ 4816fd34d6fSJeff Roberson #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */ 4822018f30cSMike Silbersack #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 483ca293436SRyan Libby #define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */ 4842018f30cSMike Silbersack #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 4858355f576SJeff Roberson 4866fd34d6fSJeff Roberson #define UMA_ZFLAG_INHERIT \ 4876fd34d6fSJeff Roberson (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET) 488e20a199fSJeff Roberson 4896d204a6aSRyan Libby #define PRINT_UMA_ZFLAGS "\20" \ 4906d204a6aSRyan Libby "\40CACHEONLY" \ 4916d204a6aSRyan Libby "\37TRASH" \ 4926d204a6aSRyan Libby "\36INTERNAL" \ 4936d204a6aSRyan Libby "\35BUCKET" \ 4946d204a6aSRyan Libby "\34RECLAIMING" \ 4956d204a6aSRyan Libby "\33CACHE" \ 496cc7ce83aSJeff Roberson "\32LIMIT" \ 497cc7ce83aSJeff Roberson "\31CTORDTOR" \ 4986d204a6aSRyan Libby "\22MINBUCKET" \ 4996d204a6aSRyan Libby "\21NUMA" \ 5006d204a6aSRyan Libby "\20PCPU" \ 5016d204a6aSRyan Libby "\17NODUMP" \ 5026d204a6aSRyan Libby "\16VTOSLAB" \ 5036d204a6aSRyan Libby "\15CACHESPREAD" \ 5046d204a6aSRyan Libby "\14MAXBUCKET" \ 5056d204a6aSRyan Libby "\13NOBUCKET" \ 5066d204a6aSRyan Libby "\12SECONDARY" \ 5076d204a6aSRyan Libby "\11HASH" \ 5086d204a6aSRyan Libby "\10VM" \ 5096d204a6aSRyan Libby "\7MTXCLASS" \ 5106d204a6aSRyan Libby "\6NOFREE" \ 5116d204a6aSRyan Libby "\5MALLOC" \ 5126d204a6aSRyan Libby "\4OFFPAGE" \ 5136d204a6aSRyan Libby "\3STATIC" \ 5146d204a6aSRyan Libby "\2ZINIT" \ 5156d204a6aSRyan Libby "\1PAGEABLE" 5166d204a6aSRyan Libby 517*4bd61e19SJeff Roberson /* 518*4bd61e19SJeff Roberson * Macros for interpreting the uz_items field. 20 bits of sleeper count 519*4bd61e19SJeff Roberson * and 44 bit of item count. 520*4bd61e19SJeff Roberson */ 521*4bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPER_SHIFT 44LL 522*4bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPERS_MAX ((1 << (64 - UZ_ITEMS_SLEEPER_SHIFT)) - 1) 523*4bd61e19SJeff Roberson #define UZ_ITEMS_COUNT_MASK ((1LL << UZ_ITEMS_SLEEPER_SHIFT) - 1) 524*4bd61e19SJeff Roberson #define UZ_ITEMS_COUNT(x) ((x) & UZ_ITEMS_COUNT_MASK) 525*4bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPERS(x) ((x) >> UZ_ITEMS_SLEEPER_SHIFT) 526*4bd61e19SJeff Roberson #define UZ_ITEMS_SLEEPER (1LL << UZ_ITEMS_SLEEPER_SHIFT) 527*4bd61e19SJeff Roberson 5285e4bb93cSKip Macy #undef UMA_ALIGN 5295e4bb93cSKip Macy 530af17e9a9SRobert Watson #ifdef _KERNEL 5318355f576SJeff Roberson /* Internal prototypes */ 53285dcf349SGleb Smirnoff static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); 5338355f576SJeff Roberson 5348355f576SJeff Roberson /* Lock Macros */ 5358355f576SJeff Roberson 536e20a199fSJeff Roberson #define KEG_LOCK_INIT(k, lc) \ 53728bc4419SJeff Roberson do { \ 53828bc4419SJeff Roberson if ((lc)) \ 539e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 540e20a199fSJeff Roberson (k)->uk_name, MTX_DEF | MTX_DUPOK); \ 54128bc4419SJeff Roberson else \ 542e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 54328bc4419SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 54428bc4419SJeff Roberson } while (0) 54528bc4419SJeff Roberson 546e20a199fSJeff Roberson #define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock) 547e20a199fSJeff Roberson #define KEG_LOCK(k) mtx_lock(&(k)->uk_lock) 548e20a199fSJeff Roberson #define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock) 549bb15d1c7SGleb Smirnoff #define KEG_LOCK_ASSERT(k) mtx_assert(&(k)->uk_lock, MA_OWNED) 550bb15d1c7SGleb Smirnoff 551bb15d1c7SGleb Smirnoff #define KEG_GET(zone, keg) do { \ 552bb15d1c7SGleb Smirnoff (keg) = (zone)->uz_keg; \ 553bb15d1c7SGleb Smirnoff KASSERT((void *)(keg) != (void *)&(zone)->uz_lock, \ 554bb15d1c7SGleb Smirnoff ("%s: Invalid zone %p type", __func__, (zone))); \ 555bb15d1c7SGleb Smirnoff } while (0) 556af526374SJeff Roberson 557af526374SJeff Roberson #define ZONE_LOCK_INIT(z, lc) \ 558af526374SJeff Roberson do { \ 559af526374SJeff Roberson if ((lc)) \ 560af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 561af526374SJeff Roberson (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 562af526374SJeff Roberson else \ 563af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 564af526374SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 565af526374SJeff Roberson } while (0) 566af526374SJeff Roberson 567af526374SJeff Roberson #define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr) 568af526374SJeff Roberson #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr) 569af526374SJeff Roberson #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr) 570af526374SJeff Roberson #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) 5710f9b7bf3SMark Johnston #define ZONE_LOCK_ASSERT(z) mtx_assert((z)->uz_lockptr, MA_OWNED) 5728355f576SJeff Roberson 5738355f576SJeff Roberson /* 5748355f576SJeff Roberson * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 5758355f576SJeff Roberson * the slab structure. 5768355f576SJeff Roberson * 5778355f576SJeff Roberson * Arguments: 5788355f576SJeff Roberson * hash The hash table to search. 5798355f576SJeff Roberson * data The base page of the item. 5808355f576SJeff Roberson * 5818355f576SJeff Roberson * Returns: 5828355f576SJeff Roberson * A pointer to a slab if successful, else NULL. 5838355f576SJeff Roberson */ 5848355f576SJeff Roberson static __inline uma_slab_t 58585dcf349SGleb Smirnoff hash_sfind(struct uma_hash *hash, uint8_t *data) 5868355f576SJeff Roberson { 5871e0701e1SJeff Roberson uma_hash_slab_t slab; 5886929b7d1SPedro F. Giffuni u_int hval; 5898355f576SJeff Roberson 5908355f576SJeff Roberson hval = UMA_HASH(hash, data); 5918355f576SJeff Roberson 5921e0701e1SJeff Roberson LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) { 5931e0701e1SJeff Roberson if ((uint8_t *)slab->uhs_data == data) 5941e0701e1SJeff Roberson return (&slab->uhs_slab); 5958355f576SJeff Roberson } 5968355f576SJeff Roberson return (NULL); 5978355f576SJeff Roberson } 5988355f576SJeff Roberson 59999571dc3SJeff Roberson static __inline uma_slab_t 60099571dc3SJeff Roberson vtoslab(vm_offset_t va) 60199571dc3SJeff Roberson { 60299571dc3SJeff Roberson vm_page_t p; 60399571dc3SJeff Roberson 60499571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 605584061b4SJeff Roberson return (p->plinks.uma.slab); 60699571dc3SJeff Roberson } 60799571dc3SJeff Roberson 60899571dc3SJeff Roberson static __inline void 609584061b4SJeff Roberson vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab) 61099571dc3SJeff Roberson { 61199571dc3SJeff Roberson vm_page_t p; 61299571dc3SJeff Roberson 6136fc96493SOlivier Houchard p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 614584061b4SJeff Roberson *slab = p->plinks.uma.slab; 615584061b4SJeff Roberson *zone = p->plinks.uma.zone; 616584061b4SJeff Roberson } 617584061b4SJeff Roberson 618584061b4SJeff Roberson static __inline void 619584061b4SJeff Roberson vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab) 620584061b4SJeff Roberson { 621584061b4SJeff Roberson vm_page_t p; 622584061b4SJeff Roberson 623584061b4SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 624584061b4SJeff Roberson p->plinks.uma.slab = slab; 625584061b4SJeff Roberson p->plinks.uma.zone = zone; 62699571dc3SJeff Roberson } 62799571dc3SJeff Roberson 6286d6a03d7SJeff Roberson extern unsigned long uma_kmem_limit; 6296d6a03d7SJeff Roberson extern unsigned long uma_kmem_total; 6306d6a03d7SJeff Roberson 6316d6a03d7SJeff Roberson /* Adjust bytes under management by UMA. */ 6326d6a03d7SJeff Roberson static inline void 6336d6a03d7SJeff Roberson uma_total_dec(unsigned long size) 6346d6a03d7SJeff Roberson { 6356d6a03d7SJeff Roberson 6366d6a03d7SJeff Roberson atomic_subtract_long(&uma_kmem_total, size); 6376d6a03d7SJeff Roberson } 6386d6a03d7SJeff Roberson 6396d6a03d7SJeff Roberson static inline void 6406d6a03d7SJeff Roberson uma_total_inc(unsigned long size) 6416d6a03d7SJeff Roberson { 6426d6a03d7SJeff Roberson 6436d6a03d7SJeff Roberson if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) 6446d6a03d7SJeff Roberson uma_reclaim_wakeup(); 6456d6a03d7SJeff Roberson } 6466d6a03d7SJeff Roberson 64748eea375SJeff Roberson /* 64848eea375SJeff Roberson * The following two functions may be defined by architecture specific code 649763df3ecSPedro F. Giffuni * if they can provide more efficient allocation functions. This is useful 65048eea375SJeff Roberson * for using direct mapped addresses. 65148eea375SJeff Roberson */ 652ab3185d1SJeff Roberson void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, 653ab3185d1SJeff Roberson uint8_t *pflag, int wait); 654f2c2231eSRyan Stone void uma_small_free(void *mem, vm_size_t size, uint8_t flags); 6552e47807cSJeff Roberson 6562e47807cSJeff Roberson /* Set a global soft limit on UMA managed memory. */ 6572e47807cSJeff Roberson void uma_set_limit(unsigned long limit); 658af17e9a9SRobert Watson #endif /* _KERNEL */ 65948eea375SJeff Roberson 6608355f576SJeff Roberson #endif /* VM_UMA_INT_H */ 661