160727d8bSWarner Losh /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4ef72505eSJeff Roberson * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 508ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 608ecce74SRobert Watson * All rights reserved. 78355f576SJeff Roberson * 88355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 98355f576SJeff Roberson * modification, are permitted provided that the following conditions 108355f576SJeff Roberson * are met: 118355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 128355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 138355f576SJeff Roberson * disclaimer. 148355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 158355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 168355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 178355f576SJeff Roberson * 188355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 198355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 208355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 218355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 228355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 238355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 248355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 258355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 268355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 278355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 288355f576SJeff Roberson * 298355f576SJeff Roberson * $FreeBSD$ 308355f576SJeff Roberson * 318355f576SJeff Roberson */ 328355f576SJeff Roberson 3339669415SGleb Smirnoff #include <sys/counter.h> 34e04223bfSMark Johnston #include <sys/_bitset.h> 357571e249SMark Johnston #include <sys/_domainset.h> 36b28cc462SGleb Smirnoff #include <sys/_task.h> 37b28cc462SGleb Smirnoff 388355f576SJeff Roberson /* 398355f576SJeff Roberson * This file includes definitions, structures, prototypes, and inlines that 408355f576SJeff Roberson * should not be used outside of the actual implementation of UMA. 418355f576SJeff Roberson */ 428355f576SJeff Roberson 438355f576SJeff Roberson /* 44ab3185d1SJeff Roberson * The brief summary; Zones describe unique allocation types. Zones are 45ab3185d1SJeff Roberson * organized into per-CPU caches which are filled by buckets. Buckets are 46ab3185d1SJeff Roberson * organized according to memory domains. Buckets are filled from kegs which 47ab3185d1SJeff Roberson * are also organized according to memory domains. Kegs describe a unique 48ab3185d1SJeff Roberson * allocation type, backend memory provider, and layout. Kegs are associated 49ab3185d1SJeff Roberson * with one or more zones and zones reference one or more kegs. Kegs provide 50ab3185d1SJeff Roberson * slabs which are virtually contiguous collections of pages. Each slab is 51ab3185d1SJeff Roberson * broken down int one or more items that will satisfy an individual allocation. 52ab3185d1SJeff Roberson * 53ab3185d1SJeff Roberson * Allocation is satisfied in the following order: 54ab3185d1SJeff Roberson * 1) Per-CPU cache 55ab3185d1SJeff Roberson * 2) Per-domain cache of buckets 56ab3185d1SJeff Roberson * 3) Slab from any of N kegs 57ab3185d1SJeff Roberson * 4) Backend page provider 58ab3185d1SJeff Roberson * 59ab3185d1SJeff Roberson * More detail on individual objects is contained below: 608355f576SJeff Roberson * 61099a0e58SBosko Milekic * Kegs contain lists of slabs which are stored in either the full bin, empty 628355f576SJeff Roberson * bin, or partially allocated bin, to reduce fragmentation. They also contain 638355f576SJeff Roberson * the user supplied value for size, which is adjusted for alignment purposes 64099a0e58SBosko Milekic * and rsize is the result of that. The Keg also stores information for 658355f576SJeff Roberson * managing a hash of page addresses that maps pages to uma_slab_t structures 668355f576SJeff Roberson * for pages that don't have embedded uma_slab_t's. 678355f576SJeff Roberson * 68ab3185d1SJeff Roberson * Keg slab lists are organized by memory domain to support NUMA allocation 69ab3185d1SJeff Roberson * policies. By default allocations are spread across domains to reduce the 70ab3185d1SJeff Roberson * potential for hotspots. Special keg creation flags may be specified to 71ab3185d1SJeff Roberson * prefer location allocation. However there is no strict enforcement as frees 72ab3185d1SJeff Roberson * may happen on any CPU and these are returned to the CPU-local cache 73ab3185d1SJeff Roberson * regardless of the originating domain. 74ab3185d1SJeff Roberson * 758355f576SJeff Roberson * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 768355f576SJeff Roberson * be allocated off the page from a special slab zone. The free list within a 77ef72505eSJeff Roberson * slab is managed with a bitmask. For item sizes that would yield more than 78ef72505eSJeff Roberson * 10% memory waste we potentially allocate a separate uma_slab_t if this will 79ef72505eSJeff Roberson * improve the number of items per slab that will fit. 808355f576SJeff Roberson * 818355f576SJeff Roberson * The only really gross cases, with regards to memory waste, are for those 828355f576SJeff Roberson * items that are just over half the page size. You can get nearly 50% waste, 838355f576SJeff Roberson * so you fall back to the memory footprint of the power of two allocator. I 848355f576SJeff Roberson * have looked at memory allocation sizes on many of the machines available to 858355f576SJeff Roberson * me, and there does not seem to be an abundance of allocations at this range 868355f576SJeff Roberson * so at this time it may not make sense to optimize for it. This can, of 878355f576SJeff Roberson * course, be solved with dynamic slab sizes. 888355f576SJeff Roberson * 89099a0e58SBosko Milekic * Kegs may serve multiple Zones but by far most of the time they only serve 90099a0e58SBosko Milekic * one. When a Zone is created, a Keg is allocated and setup for it. While 91099a0e58SBosko Milekic * the backing Keg stores slabs, the Zone caches Buckets of items allocated 92099a0e58SBosko Milekic * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 93099a0e58SBosko Milekic * pair, as well as with its own set of small per-CPU caches, layered above 94099a0e58SBosko Milekic * the Zone's general Bucket cache. 95099a0e58SBosko Milekic * 966ab3b958SRobert Watson * The PCPU caches are protected by critical sections, and may be accessed 976ab3b958SRobert Watson * safely only from their associated CPU, while the Zones backed by the same 986ab3b958SRobert Watson * Keg all share a common Keg lock (to coalesce contention on the backing 996ab3b958SRobert Watson * slabs). The backing Keg typically only serves one Zone but in the case of 1006ab3b958SRobert Watson * multiple Zones, one of the Zones is considered the Master Zone and all 1016ab3b958SRobert Watson * Zone-related stats from the Keg are done in the Master Zone. For an 1026ab3b958SRobert Watson * example of a Multi-Zone setup, refer to the Mbuf allocation code. 1038355f576SJeff Roberson */ 1048355f576SJeff Roberson 1058355f576SJeff Roberson /* 1068355f576SJeff Roberson * This is the representation for normal (Non OFFPAGE slab) 1078355f576SJeff Roberson * 1088355f576SJeff Roberson * i == item 1098355f576SJeff Roberson * s == slab pointer 1108355f576SJeff Roberson * 1118355f576SJeff Roberson * <---------------- Page (UMA_SLAB_SIZE) ------------------> 1128355f576SJeff Roberson * ___________________________________________________________ 1138355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 1148355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 1158355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 1168355f576SJeff Roberson * |___________________________________________________________| 1178355f576SJeff Roberson * 1188355f576SJeff Roberson * 1198355f576SJeff Roberson * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 1208355f576SJeff Roberson * 1218355f576SJeff Roberson * ___________________________________________________________ 1228355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 1238355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 1248355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 1258355f576SJeff Roberson * |___________________________________________________________| 1268355f576SJeff Roberson * ___________ ^ 1278355f576SJeff Roberson * |slab header| | 1288355f576SJeff Roberson * |___________|---* 1298355f576SJeff Roberson * 1308355f576SJeff Roberson */ 1318355f576SJeff Roberson 1328355f576SJeff Roberson #ifndef VM_UMA_INT_H 1338355f576SJeff Roberson #define VM_UMA_INT_H 1348355f576SJeff Roberson 1358355f576SJeff Roberson #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 1368355f576SJeff Roberson #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 1378355f576SJeff Roberson #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 1388355f576SJeff Roberson 139ad97af7eSGleb Smirnoff /* Max waste percentage before going to off page slab management */ 140ad97af7eSGleb Smirnoff #define UMA_MAX_WASTE 10 1418355f576SJeff Roberson 1428355f576SJeff Roberson /* 1433d5e3df7SGleb Smirnoff * Actual size of uma_slab when it is placed at an end of a page 1443d5e3df7SGleb Smirnoff * with pointer sized alignment requirement. 1455073a083SGleb Smirnoff */ 1463d5e3df7SGleb Smirnoff #define SIZEOF_UMA_SLAB ((sizeof(struct uma_slab) & UMA_ALIGN_PTR) ? \ 1473d5e3df7SGleb Smirnoff (sizeof(struct uma_slab) & ~UMA_ALIGN_PTR) + \ 1483d5e3df7SGleb Smirnoff (UMA_ALIGN_PTR + 1) : sizeof(struct uma_slab)) 1493d5e3df7SGleb Smirnoff 1503d5e3df7SGleb Smirnoff /* 1513d5e3df7SGleb Smirnoff * Size of memory in a not offpage single page slab available for actual items. 1523d5e3df7SGleb Smirnoff */ 1533d5e3df7SGleb Smirnoff #define UMA_SLAB_SPACE (PAGE_SIZE - SIZEOF_UMA_SLAB) 1545073a083SGleb Smirnoff 1555073a083SGleb Smirnoff /* 1568355f576SJeff Roberson * I doubt there will be many cases where this is exceeded. This is the initial 1578355f576SJeff Roberson * size of the hash table for uma_slabs that are managed off page. This hash 1588355f576SJeff Roberson * does expand by powers of two. Currently it doesn't get smaller. 1598355f576SJeff Roberson */ 1608355f576SJeff Roberson #define UMA_HASH_SIZE_INIT 32 1618355f576SJeff Roberson 1628355f576SJeff Roberson /* 1638355f576SJeff Roberson * I should investigate other hashing algorithms. This should yield a low 1648355f576SJeff Roberson * number of collisions if the pages are relatively contiguous. 1658355f576SJeff Roberson */ 1668355f576SJeff Roberson 167ef72505eSJeff Roberson #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask) 1688355f576SJeff Roberson 1698355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem) \ 1708355f576SJeff Roberson SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 1714e2d83fcSAntoine Brodin (mem))], (s), us_hlink) 1728355f576SJeff Roberson #define UMA_HASH_REMOVE(h, s, mem) \ 1738355f576SJeff Roberson SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 1744e2d83fcSAntoine Brodin (mem))], (s), uma_slab, us_hlink) 1758355f576SJeff Roberson 1768355f576SJeff Roberson /* Hash table for freed address -> slab translation */ 1778355f576SJeff Roberson 1788355f576SJeff Roberson SLIST_HEAD(slabhead, uma_slab); 1798355f576SJeff Roberson 1808355f576SJeff Roberson struct uma_hash { 1818355f576SJeff Roberson struct slabhead *uh_slab_hash; /* Hash table for slabs */ 182*6929b7d1SPedro F. Giffuni u_int uh_hashsize; /* Current size of the hash table */ 183*6929b7d1SPedro F. Giffuni u_int uh_hashmask; /* Mask used during hashing */ 1848355f576SJeff Roberson }; 1858355f576SJeff Roberson 1868355f576SJeff Roberson /* 1875e4bb93cSKip Macy * align field or structure to cache line 1885e4bb93cSKip Macy */ 18912f69195SJustin Hibbits #if defined(__amd64__) || defined(__powerpc64__) 190782e38aaSMateusz Guzik #define UMA_ALIGN __aligned(128) 1911a23373cSKip Macy #else 1926b4391d7SKip Macy #define UMA_ALIGN 1931a23373cSKip Macy #endif 1945e4bb93cSKip Macy 1955e4bb93cSKip Macy /* 1968355f576SJeff Roberson * Structures for per cpu queues. 1978355f576SJeff Roberson */ 1988355f576SJeff Roberson 1998355f576SJeff Roberson struct uma_bucket { 2008355f576SJeff Roberson LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 201306abf0fSGleb Smirnoff int16_t ub_cnt; /* Count of items in bucket. */ 202cae33c14SJeff Roberson int16_t ub_entries; /* Max items. */ 203cae33c14SJeff Roberson void *ub_bucket[]; /* actual allocation storage */ 2041a23373cSKip Macy }; 2058355f576SJeff Roberson 2068355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t; 2078355f576SJeff Roberson 2088355f576SJeff Roberson struct uma_cache { 2098355f576SJeff Roberson uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 2108355f576SJeff Roberson uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 21185dcf349SGleb Smirnoff uint64_t uc_allocs; /* Count of allocations */ 21285dcf349SGleb Smirnoff uint64_t uc_frees; /* Count of frees */ 2135e4bb93cSKip Macy } UMA_ALIGN; 2148355f576SJeff Roberson 2158355f576SJeff Roberson typedef struct uma_cache * uma_cache_t; 2168355f576SJeff Roberson 2178355f576SJeff Roberson /* 218ab3185d1SJeff Roberson * Per-domain memory list. Embedded in the kegs. 219ab3185d1SJeff Roberson */ 220ab3185d1SJeff Roberson struct uma_domain { 221ab3185d1SJeff Roberson LIST_HEAD(,uma_slab) ud_part_slab; /* partially allocated slabs */ 222ab3185d1SJeff Roberson LIST_HEAD(,uma_slab) ud_free_slab; /* empty slab list */ 223ab3185d1SJeff Roberson LIST_HEAD(,uma_slab) ud_full_slab; /* full slabs */ 224ab3185d1SJeff Roberson }; 225ab3185d1SJeff Roberson 226ab3185d1SJeff Roberson typedef struct uma_domain * uma_domain_t; 227ab3185d1SJeff Roberson 228ab3185d1SJeff Roberson /* 229099a0e58SBosko Milekic * Keg management structure 230099a0e58SBosko Milekic * 231099a0e58SBosko Milekic * TODO: Optimize for cache line size 232099a0e58SBosko Milekic * 233099a0e58SBosko Milekic */ 234099a0e58SBosko Milekic struct uma_keg { 235bb15d1c7SGleb Smirnoff struct mtx uk_lock; /* Lock for the keg must be first. 236bb15d1c7SGleb Smirnoff * See shared uz_keg/uz_lockptr 237bb15d1c7SGleb Smirnoff * member of struct uma_zone. */ 238099a0e58SBosko Milekic struct uma_hash uk_hash; 239099a0e58SBosko Milekic LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 240099a0e58SBosko Milekic 241194a979eSMark Johnston struct domainset_ref uk_dr; /* Domain selection policy. */ 24285dcf349SGleb Smirnoff uint32_t uk_align; /* Alignment mask */ 24385dcf349SGleb Smirnoff uint32_t uk_pages; /* Total page count */ 24485dcf349SGleb Smirnoff uint32_t uk_free; /* Count of items free in slabs */ 2456fd34d6fSJeff Roberson uint32_t uk_reserve; /* Number of reserved items. */ 24685dcf349SGleb Smirnoff uint32_t uk_size; /* Requested size of each item */ 24785dcf349SGleb Smirnoff uint32_t uk_rsize; /* Real size of each item */ 248099a0e58SBosko Milekic 249099a0e58SBosko Milekic uma_init uk_init; /* Keg's init routine */ 250099a0e58SBosko Milekic uma_fini uk_fini; /* Keg's fini routine */ 251099a0e58SBosko Milekic uma_alloc uk_allocf; /* Allocation function */ 252099a0e58SBosko Milekic uma_free uk_freef; /* Free routine */ 253099a0e58SBosko Milekic 254a4915c21SAttilio Rao u_long uk_offset; /* Next free offset from base KVA */ 255a4915c21SAttilio Rao vm_offset_t uk_kva; /* Zone base KVA */ 256099a0e58SBosko Milekic uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 257099a0e58SBosko Milekic 2582d54d4bbSMark Johnston uint32_t uk_pgoff; /* Offset to uma_slab struct */ 25985dcf349SGleb Smirnoff uint16_t uk_ppera; /* pages per allocation from backend */ 26085dcf349SGleb Smirnoff uint16_t uk_ipers; /* Items per slab */ 26185dcf349SGleb Smirnoff uint32_t uk_flags; /* Internal flags */ 262ad97af7eSGleb Smirnoff 263ad97af7eSGleb Smirnoff /* Least used fields go to the last cache line. */ 264ad97af7eSGleb Smirnoff const char *uk_name; /* Name of creating zone. */ 265ad97af7eSGleb Smirnoff LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 266ab3185d1SJeff Roberson 267ab3185d1SJeff Roberson /* Must be last, variable sized. */ 268ab3185d1SJeff Roberson struct uma_domain uk_domain[]; /* Keg's slab lists. */ 269099a0e58SBosko Milekic }; 270099a0e58SBosko Milekic typedef struct uma_keg * uma_keg_t; 271099a0e58SBosko Milekic 272ef72505eSJeff Roberson /* 273ef72505eSJeff Roberson * Free bits per-slab. 274ef72505eSJeff Roberson */ 275ef72505eSJeff Roberson #define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT) 276ef72505eSJeff Roberson BITSET_DEFINE(slabbits, SLAB_SETSIZE); 277099a0e58SBosko Milekic 278ef72505eSJeff Roberson /* 279ef72505eSJeff Roberson * The slab structure manages a single contiguous allocation from backing 280ef72505eSJeff Roberson * store and subdivides it into individually allocatable items. 281ef72505eSJeff Roberson */ 282ef72505eSJeff Roberson struct uma_slab { 283099a0e58SBosko Milekic uma_keg_t us_keg; /* Keg we live in */ 284099a0e58SBosko Milekic union { 285099a0e58SBosko Milekic LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 286099a0e58SBosko Milekic unsigned long _us_size; /* Size of allocation */ 287099a0e58SBosko Milekic } us_type; 288099a0e58SBosko Milekic SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 28985dcf349SGleb Smirnoff uint8_t *us_data; /* First item */ 290ef72505eSJeff Roberson struct slabbits us_free; /* Free bitmask. */ 291ef72505eSJeff Roberson #ifdef INVARIANTS 292ef72505eSJeff Roberson struct slabbits us_debugfree; /* Debug bitmask. */ 293ef72505eSJeff Roberson #endif 29485dcf349SGleb Smirnoff uint16_t us_freecount; /* How many are free? */ 29585dcf349SGleb Smirnoff uint8_t us_flags; /* Page flags see uma.h */ 296ab3185d1SJeff Roberson uint8_t us_domain; /* Backing NUMA domain. */ 297099a0e58SBosko Milekic }; 298099a0e58SBosko Milekic 299ef72505eSJeff Roberson #define us_link us_type._us_link 300ef72505eSJeff Roberson #define us_size us_type._us_size 301099a0e58SBosko Milekic 302ab3185d1SJeff Roberson #if MAXMEMDOM >= 255 303ab3185d1SJeff Roberson #error "Slab domain type insufficient" 304ab3185d1SJeff Roberson #endif 305ab3185d1SJeff Roberson 306099a0e58SBosko Milekic typedef struct uma_slab * uma_slab_t; 307e20a199fSJeff Roberson 308ab3185d1SJeff Roberson struct uma_zone_domain { 309ab3185d1SJeff Roberson LIST_HEAD(,uma_bucket) uzd_buckets; /* full buckets */ 3100f9b7bf3SMark Johnston long uzd_nitems; /* total item count */ 3110f9b7bf3SMark Johnston long uzd_imax; /* maximum item count this period */ 3120f9b7bf3SMark Johnston long uzd_imin; /* minimum item count this period */ 3130f9b7bf3SMark Johnston long uzd_wss; /* working set size estimate */ 314ab3185d1SJeff Roberson }; 315ab3185d1SJeff Roberson 316ab3185d1SJeff Roberson typedef struct uma_zone_domain * uma_zone_domain_t; 317ab3185d1SJeff Roberson 318244f4554SBosko Milekic /* 3198355f576SJeff Roberson * Zone management structure 3208355f576SJeff Roberson * 3218355f576SJeff Roberson * TODO: Optimize for cache line size 3228355f576SJeff Roberson * 3238355f576SJeff Roberson */ 3248355f576SJeff Roberson struct uma_zone { 32563b5557bSJeff Roberson /* Offset 0, used in alloc/free fast/medium fast path and const. */ 326bb15d1c7SGleb Smirnoff union { 327bb15d1c7SGleb Smirnoff uma_keg_t uz_keg; /* This zone's keg */ 328bb15d1c7SGleb Smirnoff struct mtx *uz_lockptr; /* To keg or to self */ 329bb15d1c7SGleb Smirnoff }; 330ab3185d1SJeff Roberson struct uma_zone_domain *uz_domain; /* per-domain buckets */ 33163b5557bSJeff Roberson uint32_t uz_flags; /* Flags inherited from kegs */ 33263b5557bSJeff Roberson uint32_t uz_size; /* Size inherited from kegs */ 3338355f576SJeff Roberson uma_ctor uz_ctor; /* Constructor for each allocation */ 3348355f576SJeff Roberson uma_dtor uz_dtor; /* Destructor */ 335bb15d1c7SGleb Smirnoff uint64_t uz_items; /* Total items count */ 336bb15d1c7SGleb Smirnoff uint64_t uz_max_items; /* Maximum number of items to alloc */ 337bb15d1c7SGleb Smirnoff uint32_t uz_sleepers; /* Number of sleepers on memory */ 338bb15d1c7SGleb Smirnoff uint16_t uz_count; /* Amount of items in full bucket */ 339bb15d1c7SGleb Smirnoff uint16_t uz_count_max; /* Maximum amount of items there */ 34063b5557bSJeff Roberson 34163b5557bSJeff Roberson /* Offset 64, used in bucket replenish. */ 3420095a784SJeff Roberson uma_import uz_import; /* Import new memory to cache. */ 3430095a784SJeff Roberson uma_release uz_release; /* Release memory from cache. */ 3440095a784SJeff Roberson void *uz_arg; /* Import/release argument. */ 345bb15d1c7SGleb Smirnoff uma_init uz_init; /* Initializer for each item */ 346bb15d1c7SGleb Smirnoff uma_fini uz_fini; /* Finalizer for each item. */ 347ad66f958SGleb Smirnoff void *uz_spare; 348bb15d1c7SGleb Smirnoff uint64_t uz_bkt_count; /* Items in bucket cache */ 349bb15d1c7SGleb Smirnoff uint64_t uz_bkt_max; /* Maximum bucket cache size */ 350099a0e58SBosko Milekic 35163b5557bSJeff Roberson /* Offset 128 Rare. */ 35263b5557bSJeff Roberson /* 35363b5557bSJeff Roberson * The lock is placed here to avoid adjacent line prefetcher 35463b5557bSJeff Roberson * in fast paths and to take up space near infrequently accessed 35563b5557bSJeff Roberson * members to reduce alignment overhead. 35663b5557bSJeff Roberson */ 35763b5557bSJeff Roberson struct mtx uz_lock; /* Lock for the zone */ 358bb15d1c7SGleb Smirnoff LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 359bb15d1c7SGleb Smirnoff const char *uz_name; /* Text name of the zone */ 36063b5557bSJeff Roberson /* The next two fields are used to print a rate-limited warnings. */ 36163b5557bSJeff Roberson const char *uz_warning; /* Warning to print on failure */ 36263b5557bSJeff Roberson struct timeval uz_ratecheck; /* Warnings rate-limiting */ 36363b5557bSJeff Roberson struct task uz_maxaction; /* Task to run when at limit */ 364bb15d1c7SGleb Smirnoff uint16_t uz_count_min; /* Minimal amount of items in bucket */ 3655e4bb93cSKip Macy 366bb15d1c7SGleb Smirnoff /* Offset 256, stats. */ 3672efcc8cbSGleb Smirnoff counter_u64_t uz_allocs; /* Total number of allocations */ 3682efcc8cbSGleb Smirnoff counter_u64_t uz_frees; /* Total number of frees */ 3692efcc8cbSGleb Smirnoff counter_u64_t uz_fails; /* Total number of alloc failures */ 37085dcf349SGleb Smirnoff uint64_t uz_sleeps; /* Total number of alloc sleeps */ 37154503a13SJonathan T. Looney 3728355f576SJeff Roberson /* 3738355f576SJeff Roberson * This HAS to be the last item because we adjust the zone size 3748355f576SJeff Roberson * based on NCPU and then allocate the space for the zones. 3758355f576SJeff Roberson */ 376ab3185d1SJeff Roberson struct uma_cache uz_cpu[]; /* Per cpu caches */ 377ab3185d1SJeff Roberson 378ab3185d1SJeff Roberson /* uz_domain follows here. */ 3798355f576SJeff Roberson }; 3808355f576SJeff Roberson 381b60f5b79SJeff Roberson /* 382b60f5b79SJeff Roberson * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 383b60f5b79SJeff Roberson */ 384bb15d1c7SGleb Smirnoff #define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */ 385e20a199fSJeff Roberson #define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */ 3866fd34d6fSJeff Roberson #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */ 3872018f30cSMike Silbersack #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 3882018f30cSMike Silbersack #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 3898355f576SJeff Roberson 3906fd34d6fSJeff Roberson #define UMA_ZFLAG_INHERIT \ 3916fd34d6fSJeff Roberson (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET) 392e20a199fSJeff Roberson 3935e4bb93cSKip Macy #undef UMA_ALIGN 3945e4bb93cSKip Macy 395af17e9a9SRobert Watson #ifdef _KERNEL 3968355f576SJeff Roberson /* Internal prototypes */ 39785dcf349SGleb Smirnoff static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); 398f2c2231eSRyan Stone void *uma_large_malloc(vm_size_t size, int wait); 399ab3185d1SJeff Roberson void *uma_large_malloc_domain(vm_size_t size, int domain, int wait); 4008355f576SJeff Roberson void uma_large_free(uma_slab_t slab); 4018355f576SJeff Roberson 4028355f576SJeff Roberson /* Lock Macros */ 4038355f576SJeff Roberson 404e20a199fSJeff Roberson #define KEG_LOCK_INIT(k, lc) \ 40528bc4419SJeff Roberson do { \ 40628bc4419SJeff Roberson if ((lc)) \ 407e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 408e20a199fSJeff Roberson (k)->uk_name, MTX_DEF | MTX_DUPOK); \ 40928bc4419SJeff Roberson else \ 410e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 41128bc4419SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 41228bc4419SJeff Roberson } while (0) 41328bc4419SJeff Roberson 414e20a199fSJeff Roberson #define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock) 415e20a199fSJeff Roberson #define KEG_LOCK(k) mtx_lock(&(k)->uk_lock) 416e20a199fSJeff Roberson #define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock) 417bb15d1c7SGleb Smirnoff #define KEG_LOCK_ASSERT(k) mtx_assert(&(k)->uk_lock, MA_OWNED) 418bb15d1c7SGleb Smirnoff 419bb15d1c7SGleb Smirnoff #define KEG_GET(zone, keg) do { \ 420bb15d1c7SGleb Smirnoff (keg) = (zone)->uz_keg; \ 421bb15d1c7SGleb Smirnoff KASSERT((void *)(keg) != (void *)&(zone)->uz_lock, \ 422bb15d1c7SGleb Smirnoff ("%s: Invalid zone %p type", __func__, (zone))); \ 423bb15d1c7SGleb Smirnoff } while (0) 424af526374SJeff Roberson 425af526374SJeff Roberson #define ZONE_LOCK_INIT(z, lc) \ 426af526374SJeff Roberson do { \ 427af526374SJeff Roberson if ((lc)) \ 428af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 429af526374SJeff Roberson (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 430af526374SJeff Roberson else \ 431af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 432af526374SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 433af526374SJeff Roberson } while (0) 434af526374SJeff Roberson 435af526374SJeff Roberson #define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr) 436af526374SJeff Roberson #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr) 437af526374SJeff Roberson #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr) 438af526374SJeff Roberson #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) 4390f9b7bf3SMark Johnston #define ZONE_LOCK_ASSERT(z) mtx_assert((z)->uz_lockptr, MA_OWNED) 4408355f576SJeff Roberson 4418355f576SJeff Roberson /* 4428355f576SJeff Roberson * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 4438355f576SJeff Roberson * the slab structure. 4448355f576SJeff Roberson * 4458355f576SJeff Roberson * Arguments: 4468355f576SJeff Roberson * hash The hash table to search. 4478355f576SJeff Roberson * data The base page of the item. 4488355f576SJeff Roberson * 4498355f576SJeff Roberson * Returns: 4508355f576SJeff Roberson * A pointer to a slab if successful, else NULL. 4518355f576SJeff Roberson */ 4528355f576SJeff Roberson static __inline uma_slab_t 45385dcf349SGleb Smirnoff hash_sfind(struct uma_hash *hash, uint8_t *data) 4548355f576SJeff Roberson { 4558355f576SJeff Roberson uma_slab_t slab; 456*6929b7d1SPedro F. Giffuni u_int hval; 4578355f576SJeff Roberson 4588355f576SJeff Roberson hval = UMA_HASH(hash, data); 4598355f576SJeff Roberson 4608355f576SJeff Roberson SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { 46185dcf349SGleb Smirnoff if ((uint8_t *)slab->us_data == data) 4628355f576SJeff Roberson return (slab); 4638355f576SJeff Roberson } 4648355f576SJeff Roberson return (NULL); 4658355f576SJeff Roberson } 4668355f576SJeff Roberson 46799571dc3SJeff Roberson static __inline uma_slab_t 46899571dc3SJeff Roberson vtoslab(vm_offset_t va) 46999571dc3SJeff Roberson { 47099571dc3SJeff Roberson vm_page_t p; 47199571dc3SJeff Roberson 47299571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 4739eab5484SKonstantin Belousov return ((uma_slab_t)p->plinks.s.pv); 47499571dc3SJeff Roberson } 47599571dc3SJeff Roberson 47699571dc3SJeff Roberson static __inline void 47799571dc3SJeff Roberson vsetslab(vm_offset_t va, uma_slab_t slab) 47899571dc3SJeff Roberson { 47999571dc3SJeff Roberson vm_page_t p; 48099571dc3SJeff Roberson 4816fc96493SOlivier Houchard p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 482c325e866SKonstantin Belousov p->plinks.s.pv = slab; 48399571dc3SJeff Roberson } 48499571dc3SJeff Roberson 48548eea375SJeff Roberson /* 48648eea375SJeff Roberson * The following two functions may be defined by architecture specific code 487763df3ecSPedro F. Giffuni * if they can provide more efficient allocation functions. This is useful 48848eea375SJeff Roberson * for using direct mapped addresses. 48948eea375SJeff Roberson */ 490ab3185d1SJeff Roberson void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, 491ab3185d1SJeff Roberson uint8_t *pflag, int wait); 492f2c2231eSRyan Stone void uma_small_free(void *mem, vm_size_t size, uint8_t flags); 4932e47807cSJeff Roberson 4942e47807cSJeff Roberson /* Set a global soft limit on UMA managed memory. */ 4952e47807cSJeff Roberson void uma_set_limit(unsigned long limit); 496af17e9a9SRobert Watson #endif /* _KERNEL */ 49748eea375SJeff Roberson 4988355f576SJeff Roberson #endif /* VM_UMA_INT_H */ 499