160727d8bSWarner Losh /*- 2ef72505eSJeff Roberson * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 308ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 408ecce74SRobert Watson * All rights reserved. 58355f576SJeff Roberson * 68355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 78355f576SJeff Roberson * modification, are permitted provided that the following conditions 88355f576SJeff Roberson * are met: 98355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 108355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 118355f576SJeff Roberson * disclaimer. 128355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 138355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 148355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 158355f576SJeff Roberson * 168355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 178355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 188355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 198355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 208355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 218355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 228355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 238355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 248355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 258355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 268355f576SJeff Roberson * 278355f576SJeff Roberson * $FreeBSD$ 288355f576SJeff Roberson * 298355f576SJeff Roberson */ 308355f576SJeff Roberson 31*b28cc462SGleb Smirnoff #include <sys/_task.h> 32*b28cc462SGleb Smirnoff 338355f576SJeff Roberson /* 348355f576SJeff Roberson * This file includes definitions, structures, prototypes, and inlines that 358355f576SJeff Roberson * should not be used outside of the actual implementation of UMA. 368355f576SJeff Roberson */ 378355f576SJeff Roberson 388355f576SJeff Roberson /* 398355f576SJeff Roberson * Here's a quick description of the relationship between the objects: 408355f576SJeff Roberson * 41099a0e58SBosko Milekic * Kegs contain lists of slabs which are stored in either the full bin, empty 428355f576SJeff Roberson * bin, or partially allocated bin, to reduce fragmentation. They also contain 438355f576SJeff Roberson * the user supplied value for size, which is adjusted for alignment purposes 44099a0e58SBosko Milekic * and rsize is the result of that. The Keg also stores information for 458355f576SJeff Roberson * managing a hash of page addresses that maps pages to uma_slab_t structures 468355f576SJeff Roberson * for pages that don't have embedded uma_slab_t's. 478355f576SJeff Roberson * 488355f576SJeff Roberson * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 498355f576SJeff Roberson * be allocated off the page from a special slab zone. The free list within a 50ef72505eSJeff Roberson * slab is managed with a bitmask. For item sizes that would yield more than 51ef72505eSJeff Roberson * 10% memory waste we potentially allocate a separate uma_slab_t if this will 52ef72505eSJeff Roberson * improve the number of items per slab that will fit. 538355f576SJeff Roberson * 548355f576SJeff Roberson * The only really gross cases, with regards to memory waste, are for those 558355f576SJeff Roberson * items that are just over half the page size. You can get nearly 50% waste, 568355f576SJeff Roberson * so you fall back to the memory footprint of the power of two allocator. I 578355f576SJeff Roberson * have looked at memory allocation sizes on many of the machines available to 588355f576SJeff Roberson * me, and there does not seem to be an abundance of allocations at this range 598355f576SJeff Roberson * so at this time it may not make sense to optimize for it. This can, of 608355f576SJeff Roberson * course, be solved with dynamic slab sizes. 618355f576SJeff Roberson * 62099a0e58SBosko Milekic * Kegs may serve multiple Zones but by far most of the time they only serve 63099a0e58SBosko Milekic * one. When a Zone is created, a Keg is allocated and setup for it. While 64099a0e58SBosko Milekic * the backing Keg stores slabs, the Zone caches Buckets of items allocated 65099a0e58SBosko Milekic * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 66099a0e58SBosko Milekic * pair, as well as with its own set of small per-CPU caches, layered above 67099a0e58SBosko Milekic * the Zone's general Bucket cache. 68099a0e58SBosko Milekic * 696ab3b958SRobert Watson * The PCPU caches are protected by critical sections, and may be accessed 706ab3b958SRobert Watson * safely only from their associated CPU, while the Zones backed by the same 716ab3b958SRobert Watson * Keg all share a common Keg lock (to coalesce contention on the backing 726ab3b958SRobert Watson * slabs). The backing Keg typically only serves one Zone but in the case of 736ab3b958SRobert Watson * multiple Zones, one of the Zones is considered the Master Zone and all 746ab3b958SRobert Watson * Zone-related stats from the Keg are done in the Master Zone. For an 756ab3b958SRobert Watson * example of a Multi-Zone setup, refer to the Mbuf allocation code. 768355f576SJeff Roberson */ 778355f576SJeff Roberson 788355f576SJeff Roberson /* 798355f576SJeff Roberson * This is the representation for normal (Non OFFPAGE slab) 808355f576SJeff Roberson * 818355f576SJeff Roberson * i == item 828355f576SJeff Roberson * s == slab pointer 838355f576SJeff Roberson * 848355f576SJeff Roberson * <---------------- Page (UMA_SLAB_SIZE) ------------------> 858355f576SJeff Roberson * ___________________________________________________________ 868355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 878355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 888355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 898355f576SJeff Roberson * |___________________________________________________________| 908355f576SJeff Roberson * 918355f576SJeff Roberson * 928355f576SJeff Roberson * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 938355f576SJeff Roberson * 948355f576SJeff Roberson * ___________________________________________________________ 958355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 968355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 978355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 988355f576SJeff Roberson * |___________________________________________________________| 998355f576SJeff Roberson * ___________ ^ 1008355f576SJeff Roberson * |slab header| | 1018355f576SJeff Roberson * |___________|---* 1028355f576SJeff Roberson * 1038355f576SJeff Roberson */ 1048355f576SJeff Roberson 1058355f576SJeff Roberson #ifndef VM_UMA_INT_H 1068355f576SJeff Roberson #define VM_UMA_INT_H 1078355f576SJeff Roberson 1088355f576SJeff Roberson #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 1098355f576SJeff Roberson #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 1108355f576SJeff Roberson #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 1118355f576SJeff Roberson 112342f1793SAlan Cox #define UMA_BOOT_PAGES 64 /* Pages allocated for startup */ 1138355f576SJeff Roberson 114ad97af7eSGleb Smirnoff /* Max waste percentage before going to off page slab management */ 115ad97af7eSGleb Smirnoff #define UMA_MAX_WASTE 10 1168355f576SJeff Roberson 1178355f576SJeff Roberson /* 1188355f576SJeff Roberson * I doubt there will be many cases where this is exceeded. This is the initial 1198355f576SJeff Roberson * size of the hash table for uma_slabs that are managed off page. This hash 1208355f576SJeff Roberson * does expand by powers of two. Currently it doesn't get smaller. 1218355f576SJeff Roberson */ 1228355f576SJeff Roberson #define UMA_HASH_SIZE_INIT 32 1238355f576SJeff Roberson 1248355f576SJeff Roberson /* 1258355f576SJeff Roberson * I should investigate other hashing algorithms. This should yield a low 1268355f576SJeff Roberson * number of collisions if the pages are relatively contiguous. 1278355f576SJeff Roberson */ 1288355f576SJeff Roberson 129ef72505eSJeff Roberson #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask) 1308355f576SJeff Roberson 1318355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem) \ 1328355f576SJeff Roberson SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 1334e2d83fcSAntoine Brodin (mem))], (s), us_hlink) 1348355f576SJeff Roberson #define UMA_HASH_REMOVE(h, s, mem) \ 1358355f576SJeff Roberson SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 1364e2d83fcSAntoine Brodin (mem))], (s), uma_slab, us_hlink) 1378355f576SJeff Roberson 1388355f576SJeff Roberson /* Hash table for freed address -> slab translation */ 1398355f576SJeff Roberson 1408355f576SJeff Roberson SLIST_HEAD(slabhead, uma_slab); 1418355f576SJeff Roberson 1428355f576SJeff Roberson struct uma_hash { 1438355f576SJeff Roberson struct slabhead *uh_slab_hash; /* Hash table for slabs */ 1448355f576SJeff Roberson int uh_hashsize; /* Current size of the hash table */ 1458355f576SJeff Roberson int uh_hashmask; /* Mask used during hashing */ 1468355f576SJeff Roberson }; 1478355f576SJeff Roberson 1488355f576SJeff Roberson /* 1495e4bb93cSKip Macy * align field or structure to cache line 1505e4bb93cSKip Macy */ 1511a23373cSKip Macy #if defined(__amd64__) 1521a23373cSKip Macy #define UMA_ALIGN __aligned(CACHE_LINE_SIZE) 1531a23373cSKip Macy #else 1546b4391d7SKip Macy #define UMA_ALIGN 1551a23373cSKip Macy #endif 1565e4bb93cSKip Macy 1575e4bb93cSKip Macy /* 1588355f576SJeff Roberson * Structures for per cpu queues. 1598355f576SJeff Roberson */ 1608355f576SJeff Roberson 1618355f576SJeff Roberson struct uma_bucket { 1628355f576SJeff Roberson LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 163cae33c14SJeff Roberson int16_t ub_cnt; /* Count of free items. */ 164cae33c14SJeff Roberson int16_t ub_entries; /* Max items. */ 165cae33c14SJeff Roberson void *ub_bucket[]; /* actual allocation storage */ 1661a23373cSKip Macy }; 1678355f576SJeff Roberson 1688355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t; 1698355f576SJeff Roberson 1708355f576SJeff Roberson struct uma_cache { 1718355f576SJeff Roberson uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 1728355f576SJeff Roberson uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 17385dcf349SGleb Smirnoff uint64_t uc_allocs; /* Count of allocations */ 17485dcf349SGleb Smirnoff uint64_t uc_frees; /* Count of frees */ 1755e4bb93cSKip Macy } UMA_ALIGN; 1768355f576SJeff Roberson 1778355f576SJeff Roberson typedef struct uma_cache * uma_cache_t; 1788355f576SJeff Roberson 1798355f576SJeff Roberson /* 180099a0e58SBosko Milekic * Keg management structure 181099a0e58SBosko Milekic * 182099a0e58SBosko Milekic * TODO: Optimize for cache line size 183099a0e58SBosko Milekic * 184099a0e58SBosko Milekic */ 185099a0e58SBosko Milekic struct uma_keg { 186af526374SJeff Roberson struct mtx_padalign uk_lock; /* Lock for the keg */ 187099a0e58SBosko Milekic struct uma_hash uk_hash; 188099a0e58SBosko Milekic 189099a0e58SBosko Milekic LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 190099a0e58SBosko Milekic LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */ 191099a0e58SBosko Milekic LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */ 192099a0e58SBosko Milekic LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */ 193099a0e58SBosko Milekic 19485dcf349SGleb Smirnoff uint32_t uk_align; /* Alignment mask */ 19585dcf349SGleb Smirnoff uint32_t uk_pages; /* Total page count */ 19685dcf349SGleb Smirnoff uint32_t uk_free; /* Count of items free in slabs */ 1976fd34d6fSJeff Roberson uint32_t uk_reserve; /* Number of reserved items. */ 19885dcf349SGleb Smirnoff uint32_t uk_size; /* Requested size of each item */ 19985dcf349SGleb Smirnoff uint32_t uk_rsize; /* Real size of each item */ 20085dcf349SGleb Smirnoff uint32_t uk_maxpages; /* Maximum number of pages to alloc */ 201099a0e58SBosko Milekic 202099a0e58SBosko Milekic uma_init uk_init; /* Keg's init routine */ 203099a0e58SBosko Milekic uma_fini uk_fini; /* Keg's fini routine */ 204099a0e58SBosko Milekic uma_alloc uk_allocf; /* Allocation function */ 205099a0e58SBosko Milekic uma_free uk_freef; /* Free routine */ 206099a0e58SBosko Milekic 207a4915c21SAttilio Rao u_long uk_offset; /* Next free offset from base KVA */ 208a4915c21SAttilio Rao vm_offset_t uk_kva; /* Zone base KVA */ 209099a0e58SBosko Milekic uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 210099a0e58SBosko Milekic 21185dcf349SGleb Smirnoff uint16_t uk_slabsize; /* Slab size for this keg */ 21285dcf349SGleb Smirnoff uint16_t uk_pgoff; /* Offset to uma_slab struct */ 21385dcf349SGleb Smirnoff uint16_t uk_ppera; /* pages per allocation from backend */ 21485dcf349SGleb Smirnoff uint16_t uk_ipers; /* Items per slab */ 21585dcf349SGleb Smirnoff uint32_t uk_flags; /* Internal flags */ 216ad97af7eSGleb Smirnoff 217ad97af7eSGleb Smirnoff /* Least used fields go to the last cache line. */ 218ad97af7eSGleb Smirnoff const char *uk_name; /* Name of creating zone. */ 219ad97af7eSGleb Smirnoff LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 220099a0e58SBosko Milekic }; 221099a0e58SBosko Milekic typedef struct uma_keg * uma_keg_t; 222099a0e58SBosko Milekic 223ef72505eSJeff Roberson /* 224ef72505eSJeff Roberson * Free bits per-slab. 225ef72505eSJeff Roberson */ 226ef72505eSJeff Roberson #define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT) 227ef72505eSJeff Roberson BITSET_DEFINE(slabbits, SLAB_SETSIZE); 228099a0e58SBosko Milekic 229ef72505eSJeff Roberson /* 230ef72505eSJeff Roberson * The slab structure manages a single contiguous allocation from backing 231ef72505eSJeff Roberson * store and subdivides it into individually allocatable items. 232ef72505eSJeff Roberson */ 233ef72505eSJeff Roberson struct uma_slab { 234099a0e58SBosko Milekic uma_keg_t us_keg; /* Keg we live in */ 235099a0e58SBosko Milekic union { 236099a0e58SBosko Milekic LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 237099a0e58SBosko Milekic unsigned long _us_size; /* Size of allocation */ 238099a0e58SBosko Milekic } us_type; 239099a0e58SBosko Milekic SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 24085dcf349SGleb Smirnoff uint8_t *us_data; /* First item */ 241ef72505eSJeff Roberson struct slabbits us_free; /* Free bitmask. */ 242ef72505eSJeff Roberson #ifdef INVARIANTS 243ef72505eSJeff Roberson struct slabbits us_debugfree; /* Debug bitmask. */ 244ef72505eSJeff Roberson #endif 24585dcf349SGleb Smirnoff uint16_t us_freecount; /* How many are free? */ 24685dcf349SGleb Smirnoff uint8_t us_flags; /* Page flags see uma.h */ 247ef72505eSJeff Roberson uint8_t us_pad; /* Pad to 32bits, unused. */ 248099a0e58SBosko Milekic }; 249099a0e58SBosko Milekic 250ef72505eSJeff Roberson #define us_link us_type._us_link 251ef72505eSJeff Roberson #define us_size us_type._us_size 252099a0e58SBosko Milekic 253099a0e58SBosko Milekic /* 254099a0e58SBosko Milekic * The slab structure for UMA_ZONE_REFCNT zones for whose items we 255099a0e58SBosko Milekic * maintain reference counters in the slab for. 256099a0e58SBosko Milekic */ 257099a0e58SBosko Milekic struct uma_slab_refcnt { 258ef72505eSJeff Roberson struct uma_slab us_head; /* slab header data */ 259ef72505eSJeff Roberson uint32_t us_refcnt[0]; /* Actually larger. */ 260099a0e58SBosko Milekic }; 261099a0e58SBosko Milekic 262099a0e58SBosko Milekic typedef struct uma_slab * uma_slab_t; 263099a0e58SBosko Milekic typedef struct uma_slab_refcnt * uma_slabrefcnt_t; 264e20a199fSJeff Roberson typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int); 265e20a199fSJeff Roberson 266e20a199fSJeff Roberson struct uma_klink { 267e20a199fSJeff Roberson LIST_ENTRY(uma_klink) kl_link; 268e20a199fSJeff Roberson uma_keg_t kl_keg; 269e20a199fSJeff Roberson }; 270e20a199fSJeff Roberson typedef struct uma_klink *uma_klink_t; 271e20a199fSJeff Roberson 272244f4554SBosko Milekic /* 2738355f576SJeff Roberson * Zone management structure 2748355f576SJeff Roberson * 2758355f576SJeff Roberson * TODO: Optimize for cache line size 2768355f576SJeff Roberson * 2778355f576SJeff Roberson */ 2788355f576SJeff Roberson struct uma_zone { 279af526374SJeff Roberson struct mtx_padalign uz_lock; /* Lock for the zone */ 280af526374SJeff Roberson struct mtx_padalign *uz_lockptr; 281bb196eb4SMatthew D Fleming const char *uz_name; /* Text name of the zone */ 2828355f576SJeff Roberson 283099a0e58SBosko Milekic LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 284fc03d22bSJeff Roberson LIST_HEAD(,uma_bucket) uz_buckets; /* full buckets */ 2858355f576SJeff Roberson 286e20a199fSJeff Roberson LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */ 287e20a199fSJeff Roberson struct uma_klink uz_klink; /* klink for first keg. */ 288e20a199fSJeff Roberson 289e20a199fSJeff Roberson uma_slaballoc uz_slab; /* Allocate a slab from the backend. */ 2908355f576SJeff Roberson uma_ctor uz_ctor; /* Constructor for each allocation */ 2918355f576SJeff Roberson uma_dtor uz_dtor; /* Destructor */ 2928355f576SJeff Roberson uma_init uz_init; /* Initializer for each item */ 2930095a784SJeff Roberson uma_fini uz_fini; /* Finalizer for each item. */ 2940095a784SJeff Roberson uma_import uz_import; /* Import new memory to cache. */ 2950095a784SJeff Roberson uma_release uz_release; /* Release memory from cache. */ 2960095a784SJeff Roberson void *uz_arg; /* Import/release argument. */ 297099a0e58SBosko Milekic 29885dcf349SGleb Smirnoff uint32_t uz_flags; /* Flags inherited from kegs */ 29985dcf349SGleb Smirnoff uint32_t uz_size; /* Size inherited from kegs */ 3005e4bb93cSKip Macy 3010095a784SJeff Roberson volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */ 3020095a784SJeff Roberson volatile u_long uz_fails; /* Total number of alloc failures */ 3030095a784SJeff Roberson volatile u_long uz_frees; /* Total number of frees */ 30485dcf349SGleb Smirnoff uint64_t uz_sleeps; /* Total number of alloc sleeps */ 305ace66b56SAlexander Motin uint16_t uz_count; /* Amount of items in full bucket */ 306ace66b56SAlexander Motin uint16_t uz_count_min; /* Minimal amount of items there */ 307099a0e58SBosko Milekic 30854503a13SJonathan T. Looney /* The next two fields are used to print a rate-limited warnings. */ 3092f891cd5SPawel Jakub Dawidek const char *uz_warning; /* Warning to print on failure */ 3102f891cd5SPawel Jakub Dawidek struct timeval uz_ratecheck; /* Warnings rate-limiting */ 3112f891cd5SPawel Jakub Dawidek 312e60b2fcbSGleb Smirnoff struct task uz_maxaction; /* Task to run when at limit */ 31354503a13SJonathan T. Looney 3148355f576SJeff Roberson /* 3158355f576SJeff Roberson * This HAS to be the last item because we adjust the zone size 3168355f576SJeff Roberson * based on NCPU and then allocate the space for the zones. 3178355f576SJeff Roberson */ 31843ffa928SScott Long struct uma_cache uz_cpu[1]; /* Per cpu caches */ 3198355f576SJeff Roberson }; 3208355f576SJeff Roberson 321b60f5b79SJeff Roberson /* 322b60f5b79SJeff Roberson * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 323b60f5b79SJeff Roberson */ 324e20a199fSJeff Roberson #define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */ 325e20a199fSJeff Roberson #define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */ 3266fd34d6fSJeff Roberson #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */ 3272018f30cSMike Silbersack #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 3282018f30cSMike Silbersack #define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */ 3292018f30cSMike Silbersack #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 3308355f576SJeff Roberson 3316fd34d6fSJeff Roberson #define UMA_ZFLAG_INHERIT \ 3326fd34d6fSJeff Roberson (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET) 333e20a199fSJeff Roberson 3340095a784SJeff Roberson static inline uma_keg_t 3350095a784SJeff Roberson zone_first_keg(uma_zone_t zone) 3360095a784SJeff Roberson { 337af526374SJeff Roberson uma_klink_t klink; 3380095a784SJeff Roberson 339af526374SJeff Roberson klink = LIST_FIRST(&zone->uz_kegs); 340af526374SJeff Roberson return (klink != NULL) ? klink->kl_keg : NULL; 3410095a784SJeff Roberson } 3420095a784SJeff Roberson 3435e4bb93cSKip Macy #undef UMA_ALIGN 3445e4bb93cSKip Macy 345af17e9a9SRobert Watson #ifdef _KERNEL 3468355f576SJeff Roberson /* Internal prototypes */ 34785dcf349SGleb Smirnoff static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); 348f2c2231eSRyan Stone void *uma_large_malloc(vm_size_t size, int wait); 3498355f576SJeff Roberson void uma_large_free(uma_slab_t slab); 3508355f576SJeff Roberson 3518355f576SJeff Roberson /* Lock Macros */ 3528355f576SJeff Roberson 353e20a199fSJeff Roberson #define KEG_LOCK_INIT(k, lc) \ 35428bc4419SJeff Roberson do { \ 35528bc4419SJeff Roberson if ((lc)) \ 356e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 357e20a199fSJeff Roberson (k)->uk_name, MTX_DEF | MTX_DUPOK); \ 35828bc4419SJeff Roberson else \ 359e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 36028bc4419SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 36128bc4419SJeff Roberson } while (0) 36228bc4419SJeff Roberson 363e20a199fSJeff Roberson #define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock) 364e20a199fSJeff Roberson #define KEG_LOCK(k) mtx_lock(&(k)->uk_lock) 365e20a199fSJeff Roberson #define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock) 366af526374SJeff Roberson 367af526374SJeff Roberson #define ZONE_LOCK_INIT(z, lc) \ 368af526374SJeff Roberson do { \ 369af526374SJeff Roberson if ((lc)) \ 370af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 371af526374SJeff Roberson (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 372af526374SJeff Roberson else \ 373af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 374af526374SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 375af526374SJeff Roberson } while (0) 376af526374SJeff Roberson 377af526374SJeff Roberson #define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr) 378af526374SJeff Roberson #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr) 379af526374SJeff Roberson #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr) 380af526374SJeff Roberson #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) 3818355f576SJeff Roberson 3828355f576SJeff Roberson /* 3838355f576SJeff Roberson * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 3848355f576SJeff Roberson * the slab structure. 3858355f576SJeff Roberson * 3868355f576SJeff Roberson * Arguments: 3878355f576SJeff Roberson * hash The hash table to search. 3888355f576SJeff Roberson * data The base page of the item. 3898355f576SJeff Roberson * 3908355f576SJeff Roberson * Returns: 3918355f576SJeff Roberson * A pointer to a slab if successful, else NULL. 3928355f576SJeff Roberson */ 3938355f576SJeff Roberson static __inline uma_slab_t 39485dcf349SGleb Smirnoff hash_sfind(struct uma_hash *hash, uint8_t *data) 3958355f576SJeff Roberson { 3968355f576SJeff Roberson uma_slab_t slab; 3978355f576SJeff Roberson int hval; 3988355f576SJeff Roberson 3998355f576SJeff Roberson hval = UMA_HASH(hash, data); 4008355f576SJeff Roberson 4018355f576SJeff Roberson SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { 40285dcf349SGleb Smirnoff if ((uint8_t *)slab->us_data == data) 4038355f576SJeff Roberson return (slab); 4048355f576SJeff Roberson } 4058355f576SJeff Roberson return (NULL); 4068355f576SJeff Roberson } 4078355f576SJeff Roberson 40899571dc3SJeff Roberson static __inline uma_slab_t 40999571dc3SJeff Roberson vtoslab(vm_offset_t va) 41099571dc3SJeff Roberson { 41199571dc3SJeff Roberson vm_page_t p; 41299571dc3SJeff Roberson 41399571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 4149eab5484SKonstantin Belousov return ((uma_slab_t)p->plinks.s.pv); 41599571dc3SJeff Roberson } 41699571dc3SJeff Roberson 41799571dc3SJeff Roberson static __inline void 41899571dc3SJeff Roberson vsetslab(vm_offset_t va, uma_slab_t slab) 41999571dc3SJeff Roberson { 42099571dc3SJeff Roberson vm_page_t p; 42199571dc3SJeff Roberson 4226fc96493SOlivier Houchard p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 423c325e866SKonstantin Belousov p->plinks.s.pv = slab; 42499571dc3SJeff Roberson } 42599571dc3SJeff Roberson 42648eea375SJeff Roberson /* 42748eea375SJeff Roberson * The following two functions may be defined by architecture specific code 42848eea375SJeff Roberson * if they can provide more effecient allocation functions. This is useful 42948eea375SJeff Roberson * for using direct mapped addresses. 43048eea375SJeff Roberson */ 431f2c2231eSRyan Stone void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, 432f2c2231eSRyan Stone int wait); 433f2c2231eSRyan Stone void uma_small_free(void *mem, vm_size_t size, uint8_t flags); 434af17e9a9SRobert Watson #endif /* _KERNEL */ 43548eea375SJeff Roberson 4368355f576SJeff Roberson #endif /* VM_UMA_INT_H */ 437