160727d8bSWarner Losh /*- 2ef72505eSJeff Roberson * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 308ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 408ecce74SRobert Watson * All rights reserved. 58355f576SJeff Roberson * 68355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 78355f576SJeff Roberson * modification, are permitted provided that the following conditions 88355f576SJeff Roberson * are met: 98355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 108355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 118355f576SJeff Roberson * disclaimer. 128355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 138355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 148355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 158355f576SJeff Roberson * 168355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 178355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 188355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 198355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 208355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 218355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 228355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 238355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 248355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 258355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 268355f576SJeff Roberson * 278355f576SJeff Roberson * $FreeBSD$ 288355f576SJeff Roberson * 298355f576SJeff Roberson */ 308355f576SJeff Roberson 318355f576SJeff Roberson /* 328355f576SJeff Roberson * This file includes definitions, structures, prototypes, and inlines that 338355f576SJeff Roberson * should not be used outside of the actual implementation of UMA. 348355f576SJeff Roberson */ 358355f576SJeff Roberson 368355f576SJeff Roberson /* 378355f576SJeff Roberson * Here's a quick description of the relationship between the objects: 388355f576SJeff Roberson * 39099a0e58SBosko Milekic * Kegs contain lists of slabs which are stored in either the full bin, empty 408355f576SJeff Roberson * bin, or partially allocated bin, to reduce fragmentation. They also contain 418355f576SJeff Roberson * the user supplied value for size, which is adjusted for alignment purposes 42099a0e58SBosko Milekic * and rsize is the result of that. The Keg also stores information for 438355f576SJeff Roberson * managing a hash of page addresses that maps pages to uma_slab_t structures 448355f576SJeff Roberson * for pages that don't have embedded uma_slab_t's. 458355f576SJeff Roberson * 468355f576SJeff Roberson * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 478355f576SJeff Roberson * be allocated off the page from a special slab zone. The free list within a 48ef72505eSJeff Roberson * slab is managed with a bitmask. For item sizes that would yield more than 49ef72505eSJeff Roberson * 10% memory waste we potentially allocate a separate uma_slab_t if this will 50ef72505eSJeff Roberson * improve the number of items per slab that will fit. 518355f576SJeff Roberson * 528355f576SJeff Roberson * Other potential space optimizations are storing the 8bit of linkage in space 538355f576SJeff Roberson * wasted between items due to alignment problems. This may yield a much better 548355f576SJeff Roberson * memory footprint for certain sizes of objects. Another alternative is to 558355f576SJeff Roberson * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer 5659d7277fSAlan Cox * dynamic slab sizes because we could stick with 8 bit indices and only use 578355f576SJeff Roberson * large slab sizes for zones with a lot of waste per slab. This may create 5859d7277fSAlan Cox * inefficiencies in the vm subsystem due to fragmentation in the address space. 598355f576SJeff Roberson * 608355f576SJeff Roberson * The only really gross cases, with regards to memory waste, are for those 618355f576SJeff Roberson * items that are just over half the page size. You can get nearly 50% waste, 628355f576SJeff Roberson * so you fall back to the memory footprint of the power of two allocator. I 638355f576SJeff Roberson * have looked at memory allocation sizes on many of the machines available to 648355f576SJeff Roberson * me, and there does not seem to be an abundance of allocations at this range 658355f576SJeff Roberson * so at this time it may not make sense to optimize for it. This can, of 668355f576SJeff Roberson * course, be solved with dynamic slab sizes. 678355f576SJeff Roberson * 68099a0e58SBosko Milekic * Kegs may serve multiple Zones but by far most of the time they only serve 69099a0e58SBosko Milekic * one. When a Zone is created, a Keg is allocated and setup for it. While 70099a0e58SBosko Milekic * the backing Keg stores slabs, the Zone caches Buckets of items allocated 71099a0e58SBosko Milekic * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor 72099a0e58SBosko Milekic * pair, as well as with its own set of small per-CPU caches, layered above 73099a0e58SBosko Milekic * the Zone's general Bucket cache. 74099a0e58SBosko Milekic * 756ab3b958SRobert Watson * The PCPU caches are protected by critical sections, and may be accessed 766ab3b958SRobert Watson * safely only from their associated CPU, while the Zones backed by the same 776ab3b958SRobert Watson * Keg all share a common Keg lock (to coalesce contention on the backing 786ab3b958SRobert Watson * slabs). The backing Keg typically only serves one Zone but in the case of 796ab3b958SRobert Watson * multiple Zones, one of the Zones is considered the Master Zone and all 806ab3b958SRobert Watson * Zone-related stats from the Keg are done in the Master Zone. For an 816ab3b958SRobert Watson * example of a Multi-Zone setup, refer to the Mbuf allocation code. 828355f576SJeff Roberson */ 838355f576SJeff Roberson 848355f576SJeff Roberson /* 858355f576SJeff Roberson * This is the representation for normal (Non OFFPAGE slab) 868355f576SJeff Roberson * 878355f576SJeff Roberson * i == item 888355f576SJeff Roberson * s == slab pointer 898355f576SJeff Roberson * 908355f576SJeff Roberson * <---------------- Page (UMA_SLAB_SIZE) ------------------> 918355f576SJeff Roberson * ___________________________________________________________ 928355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 938355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 948355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 958355f576SJeff Roberson * |___________________________________________________________| 968355f576SJeff Roberson * 978355f576SJeff Roberson * 988355f576SJeff Roberson * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 998355f576SJeff Roberson * 1008355f576SJeff Roberson * ___________________________________________________________ 1018355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 1028355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 1038355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 1048355f576SJeff Roberson * |___________________________________________________________| 1058355f576SJeff Roberson * ___________ ^ 1068355f576SJeff Roberson * |slab header| | 1078355f576SJeff Roberson * |___________|---* 1088355f576SJeff Roberson * 1098355f576SJeff Roberson */ 1108355f576SJeff Roberson 1118355f576SJeff Roberson #ifndef VM_UMA_INT_H 1128355f576SJeff Roberson #define VM_UMA_INT_H 1138355f576SJeff Roberson 1148355f576SJeff Roberson #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 1158355f576SJeff Roberson #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 1168355f576SJeff Roberson #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 1178355f576SJeff Roberson 118342f1793SAlan Cox #define UMA_BOOT_PAGES 64 /* Pages allocated for startup */ 1198355f576SJeff Roberson 120ad97af7eSGleb Smirnoff /* Max waste percentage before going to off page slab management */ 121ad97af7eSGleb Smirnoff #define UMA_MAX_WASTE 10 1228355f576SJeff Roberson 1238355f576SJeff Roberson /* 1248355f576SJeff Roberson * I doubt there will be many cases where this is exceeded. This is the initial 1258355f576SJeff Roberson * size of the hash table for uma_slabs that are managed off page. This hash 1268355f576SJeff Roberson * does expand by powers of two. Currently it doesn't get smaller. 1278355f576SJeff Roberson */ 1288355f576SJeff Roberson #define UMA_HASH_SIZE_INIT 32 1298355f576SJeff Roberson 1308355f576SJeff Roberson /* 1318355f576SJeff Roberson * I should investigate other hashing algorithms. This should yield a low 1328355f576SJeff Roberson * number of collisions if the pages are relatively contiguous. 1338355f576SJeff Roberson */ 1348355f576SJeff Roberson 135ef72505eSJeff Roberson #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask) 1368355f576SJeff Roberson 1378355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem) \ 1388355f576SJeff Roberson SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 1394e2d83fcSAntoine Brodin (mem))], (s), us_hlink) 1408355f576SJeff Roberson #define UMA_HASH_REMOVE(h, s, mem) \ 1418355f576SJeff Roberson SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 1424e2d83fcSAntoine Brodin (mem))], (s), uma_slab, us_hlink) 1438355f576SJeff Roberson 1448355f576SJeff Roberson /* Hash table for freed address -> slab translation */ 1458355f576SJeff Roberson 1468355f576SJeff Roberson SLIST_HEAD(slabhead, uma_slab); 1478355f576SJeff Roberson 1488355f576SJeff Roberson struct uma_hash { 1498355f576SJeff Roberson struct slabhead *uh_slab_hash; /* Hash table for slabs */ 1508355f576SJeff Roberson int uh_hashsize; /* Current size of the hash table */ 1518355f576SJeff Roberson int uh_hashmask; /* Mask used during hashing */ 1528355f576SJeff Roberson }; 1538355f576SJeff Roberson 1548355f576SJeff Roberson /* 1555e4bb93cSKip Macy * align field or structure to cache line 1565e4bb93cSKip Macy */ 1571a23373cSKip Macy #if defined(__amd64__) 1581a23373cSKip Macy #define UMA_ALIGN __aligned(CACHE_LINE_SIZE) 1591a23373cSKip Macy #else 1606b4391d7SKip Macy #define UMA_ALIGN 1611a23373cSKip Macy #endif 1625e4bb93cSKip Macy 1635e4bb93cSKip Macy /* 1648355f576SJeff Roberson * Structures for per cpu queues. 1658355f576SJeff Roberson */ 1668355f576SJeff Roberson 1678355f576SJeff Roberson struct uma_bucket { 1688355f576SJeff Roberson LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 169cae33c14SJeff Roberson int16_t ub_cnt; /* Count of free items. */ 170cae33c14SJeff Roberson int16_t ub_entries; /* Max items. */ 171cae33c14SJeff Roberson void *ub_bucket[]; /* actual allocation storage */ 1721a23373cSKip Macy }; 1738355f576SJeff Roberson 1748355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t; 1758355f576SJeff Roberson 1768355f576SJeff Roberson struct uma_cache { 1778355f576SJeff Roberson uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 1788355f576SJeff Roberson uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 17985dcf349SGleb Smirnoff uint64_t uc_allocs; /* Count of allocations */ 18085dcf349SGleb Smirnoff uint64_t uc_frees; /* Count of frees */ 1815e4bb93cSKip Macy } UMA_ALIGN; 1828355f576SJeff Roberson 1838355f576SJeff Roberson typedef struct uma_cache * uma_cache_t; 1848355f576SJeff Roberson 1858355f576SJeff Roberson /* 186099a0e58SBosko Milekic * Keg management structure 187099a0e58SBosko Milekic * 188099a0e58SBosko Milekic * TODO: Optimize for cache line size 189099a0e58SBosko Milekic * 190099a0e58SBosko Milekic */ 191099a0e58SBosko Milekic struct uma_keg { 192af526374SJeff Roberson struct mtx_padalign uk_lock; /* Lock for the keg */ 193099a0e58SBosko Milekic struct uma_hash uk_hash; 194099a0e58SBosko Milekic 195099a0e58SBosko Milekic LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */ 196099a0e58SBosko Milekic LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */ 197099a0e58SBosko Milekic LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */ 198099a0e58SBosko Milekic LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */ 199099a0e58SBosko Milekic 20085dcf349SGleb Smirnoff uint32_t uk_align; /* Alignment mask */ 20185dcf349SGleb Smirnoff uint32_t uk_pages; /* Total page count */ 20285dcf349SGleb Smirnoff uint32_t uk_free; /* Count of items free in slabs */ 203*6fd34d6fSJeff Roberson uint32_t uk_reserve; /* Number of reserved items. */ 20485dcf349SGleb Smirnoff uint32_t uk_size; /* Requested size of each item */ 20585dcf349SGleb Smirnoff uint32_t uk_rsize; /* Real size of each item */ 20685dcf349SGleb Smirnoff uint32_t uk_maxpages; /* Maximum number of pages to alloc */ 207099a0e58SBosko Milekic 208099a0e58SBosko Milekic uma_init uk_init; /* Keg's init routine */ 209099a0e58SBosko Milekic uma_fini uk_fini; /* Keg's fini routine */ 210099a0e58SBosko Milekic uma_alloc uk_allocf; /* Allocation function */ 211099a0e58SBosko Milekic uma_free uk_freef; /* Free routine */ 212099a0e58SBosko Milekic 213a4915c21SAttilio Rao u_long uk_offset; /* Next free offset from base KVA */ 214a4915c21SAttilio Rao vm_offset_t uk_kva; /* Zone base KVA */ 215099a0e58SBosko Milekic uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ 216099a0e58SBosko Milekic 21785dcf349SGleb Smirnoff uint16_t uk_slabsize; /* Slab size for this keg */ 21885dcf349SGleb Smirnoff uint16_t uk_pgoff; /* Offset to uma_slab struct */ 21985dcf349SGleb Smirnoff uint16_t uk_ppera; /* pages per allocation from backend */ 22085dcf349SGleb Smirnoff uint16_t uk_ipers; /* Items per slab */ 22185dcf349SGleb Smirnoff uint32_t uk_flags; /* Internal flags */ 222ad97af7eSGleb Smirnoff 223ad97af7eSGleb Smirnoff /* Least used fields go to the last cache line. */ 224ad97af7eSGleb Smirnoff const char *uk_name; /* Name of creating zone. */ 225ad97af7eSGleb Smirnoff LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */ 226099a0e58SBosko Milekic }; 227099a0e58SBosko Milekic typedef struct uma_keg * uma_keg_t; 228099a0e58SBosko Milekic 229ef72505eSJeff Roberson /* 230ef72505eSJeff Roberson * Free bits per-slab. 231ef72505eSJeff Roberson */ 232ef72505eSJeff Roberson #define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT) 233ef72505eSJeff Roberson BITSET_DEFINE(slabbits, SLAB_SETSIZE); 234099a0e58SBosko Milekic 235ef72505eSJeff Roberson /* 236ef72505eSJeff Roberson * The slab structure manages a single contiguous allocation from backing 237ef72505eSJeff Roberson * store and subdivides it into individually allocatable items. 238ef72505eSJeff Roberson */ 239ef72505eSJeff Roberson struct uma_slab { 240099a0e58SBosko Milekic uma_keg_t us_keg; /* Keg we live in */ 241099a0e58SBosko Milekic union { 242099a0e58SBosko Milekic LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 243099a0e58SBosko Milekic unsigned long _us_size; /* Size of allocation */ 244099a0e58SBosko Milekic } us_type; 245099a0e58SBosko Milekic SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 24685dcf349SGleb Smirnoff uint8_t *us_data; /* First item */ 247ef72505eSJeff Roberson struct slabbits us_free; /* Free bitmask. */ 248ef72505eSJeff Roberson #ifdef INVARIANTS 249ef72505eSJeff Roberson struct slabbits us_debugfree; /* Debug bitmask. */ 250ef72505eSJeff Roberson #endif 25185dcf349SGleb Smirnoff uint16_t us_freecount; /* How many are free? */ 25285dcf349SGleb Smirnoff uint8_t us_flags; /* Page flags see uma.h */ 253ef72505eSJeff Roberson uint8_t us_pad; /* Pad to 32bits, unused. */ 254099a0e58SBosko Milekic }; 255099a0e58SBosko Milekic 256ef72505eSJeff Roberson #define us_link us_type._us_link 257ef72505eSJeff Roberson #define us_size us_type._us_size 258099a0e58SBosko Milekic 259099a0e58SBosko Milekic /* 260099a0e58SBosko Milekic * The slab structure for UMA_ZONE_REFCNT zones for whose items we 261099a0e58SBosko Milekic * maintain reference counters in the slab for. 262099a0e58SBosko Milekic */ 263099a0e58SBosko Milekic struct uma_slab_refcnt { 264ef72505eSJeff Roberson struct uma_slab us_head; /* slab header data */ 265ef72505eSJeff Roberson uint32_t us_refcnt[0]; /* Actually larger. */ 266099a0e58SBosko Milekic }; 267099a0e58SBosko Milekic 268099a0e58SBosko Milekic typedef struct uma_slab * uma_slab_t; 269099a0e58SBosko Milekic typedef struct uma_slab_refcnt * uma_slabrefcnt_t; 270e20a199fSJeff Roberson typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int); 271e20a199fSJeff Roberson 272e20a199fSJeff Roberson struct uma_klink { 273e20a199fSJeff Roberson LIST_ENTRY(uma_klink) kl_link; 274e20a199fSJeff Roberson uma_keg_t kl_keg; 275e20a199fSJeff Roberson }; 276e20a199fSJeff Roberson typedef struct uma_klink *uma_klink_t; 277e20a199fSJeff Roberson 278244f4554SBosko Milekic /* 2798355f576SJeff Roberson * Zone management structure 2808355f576SJeff Roberson * 2818355f576SJeff Roberson * TODO: Optimize for cache line size 2828355f576SJeff Roberson * 2838355f576SJeff Roberson */ 2848355f576SJeff Roberson struct uma_zone { 285af526374SJeff Roberson struct mtx_padalign uz_lock; /* Lock for the zone */ 286af526374SJeff Roberson struct mtx_padalign *uz_lockptr; 287bb196eb4SMatthew D Fleming const char *uz_name; /* Text name of the zone */ 2888355f576SJeff Roberson 289099a0e58SBosko Milekic LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */ 290fc03d22bSJeff Roberson LIST_HEAD(,uma_bucket) uz_buckets; /* full buckets */ 2918355f576SJeff Roberson 292e20a199fSJeff Roberson LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */ 293e20a199fSJeff Roberson struct uma_klink uz_klink; /* klink for first keg. */ 294e20a199fSJeff Roberson 295e20a199fSJeff Roberson uma_slaballoc uz_slab; /* Allocate a slab from the backend. */ 2968355f576SJeff Roberson uma_ctor uz_ctor; /* Constructor for each allocation */ 2978355f576SJeff Roberson uma_dtor uz_dtor; /* Destructor */ 2988355f576SJeff Roberson uma_init uz_init; /* Initializer for each item */ 2990095a784SJeff Roberson uma_fini uz_fini; /* Finalizer for each item. */ 3000095a784SJeff Roberson uma_import uz_import; /* Import new memory to cache. */ 3010095a784SJeff Roberson uma_release uz_release; /* Release memory from cache. */ 3020095a784SJeff Roberson void *uz_arg; /* Import/release argument. */ 303099a0e58SBosko Milekic 30485dcf349SGleb Smirnoff uint32_t uz_flags; /* Flags inherited from kegs */ 30585dcf349SGleb Smirnoff uint32_t uz_size; /* Size inherited from kegs */ 3065e4bb93cSKip Macy 3070095a784SJeff Roberson volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */ 3080095a784SJeff Roberson volatile u_long uz_fails; /* Total number of alloc failures */ 3090095a784SJeff Roberson volatile u_long uz_frees; /* Total number of frees */ 31085dcf349SGleb Smirnoff uint64_t uz_sleeps; /* Total number of alloc sleeps */ 311936c747bSGleb Smirnoff uint16_t uz_count; /* Highest amount of items in bucket */ 312099a0e58SBosko Milekic 3132f891cd5SPawel Jakub Dawidek /* The next three fields are used to print a rate-limited warnings. */ 3142f891cd5SPawel Jakub Dawidek const char *uz_warning; /* Warning to print on failure */ 3152f891cd5SPawel Jakub Dawidek struct timeval uz_ratecheck; /* Warnings rate-limiting */ 3162f891cd5SPawel Jakub Dawidek 3178355f576SJeff Roberson /* 3188355f576SJeff Roberson * This HAS to be the last item because we adjust the zone size 3198355f576SJeff Roberson * based on NCPU and then allocate the space for the zones. 3208355f576SJeff Roberson */ 3211a23373cSKip Macy struct uma_cache uz_cpu[1]; /* Per cpu caches */ 3228355f576SJeff Roberson }; 3238355f576SJeff Roberson 324b60f5b79SJeff Roberson /* 325b60f5b79SJeff Roberson * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 326b60f5b79SJeff Roberson */ 327e20a199fSJeff Roberson #define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */ 328e20a199fSJeff Roberson #define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */ 329*6fd34d6fSJeff Roberson #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */ 3302018f30cSMike Silbersack #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */ 3312018f30cSMike Silbersack #define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */ 3322018f30cSMike Silbersack #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */ 3338355f576SJeff Roberson 334*6fd34d6fSJeff Roberson #define UMA_ZFLAG_INHERIT \ 335*6fd34d6fSJeff Roberson (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET) 336e20a199fSJeff Roberson 3370095a784SJeff Roberson static inline uma_keg_t 3380095a784SJeff Roberson zone_first_keg(uma_zone_t zone) 3390095a784SJeff Roberson { 340af526374SJeff Roberson uma_klink_t klink; 3410095a784SJeff Roberson 342af526374SJeff Roberson klink = LIST_FIRST(&zone->uz_kegs); 343af526374SJeff Roberson return (klink != NULL) ? klink->kl_keg : NULL; 3440095a784SJeff Roberson } 3450095a784SJeff Roberson 3465e4bb93cSKip Macy #undef UMA_ALIGN 3475e4bb93cSKip Macy 348af17e9a9SRobert Watson #ifdef _KERNEL 3498355f576SJeff Roberson /* Internal prototypes */ 35085dcf349SGleb Smirnoff static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data); 3518355f576SJeff Roberson void *uma_large_malloc(int size, int wait); 3528355f576SJeff Roberson void uma_large_free(uma_slab_t slab); 3538355f576SJeff Roberson 3548355f576SJeff Roberson /* Lock Macros */ 3558355f576SJeff Roberson 356e20a199fSJeff Roberson #define KEG_LOCK_INIT(k, lc) \ 35728bc4419SJeff Roberson do { \ 35828bc4419SJeff Roberson if ((lc)) \ 359e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 360e20a199fSJeff Roberson (k)->uk_name, MTX_DEF | MTX_DUPOK); \ 36128bc4419SJeff Roberson else \ 362e20a199fSJeff Roberson mtx_init(&(k)->uk_lock, (k)->uk_name, \ 36328bc4419SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 36428bc4419SJeff Roberson } while (0) 36528bc4419SJeff Roberson 366e20a199fSJeff Roberson #define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock) 367e20a199fSJeff Roberson #define KEG_LOCK(k) mtx_lock(&(k)->uk_lock) 368e20a199fSJeff Roberson #define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock) 369af526374SJeff Roberson 370af526374SJeff Roberson #define ZONE_LOCK_INIT(z, lc) \ 371af526374SJeff Roberson do { \ 372af526374SJeff Roberson if ((lc)) \ 373af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 374af526374SJeff Roberson (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 375af526374SJeff Roberson else \ 376af526374SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 377af526374SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 378af526374SJeff Roberson } while (0) 379af526374SJeff Roberson 380af526374SJeff Roberson #define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr) 381af526374SJeff Roberson #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr) 382af526374SJeff Roberson #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr) 383af526374SJeff Roberson #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) 3848355f576SJeff Roberson 3858355f576SJeff Roberson /* 3868355f576SJeff Roberson * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 3878355f576SJeff Roberson * the slab structure. 3888355f576SJeff Roberson * 3898355f576SJeff Roberson * Arguments: 3908355f576SJeff Roberson * hash The hash table to search. 3918355f576SJeff Roberson * data The base page of the item. 3928355f576SJeff Roberson * 3938355f576SJeff Roberson * Returns: 3948355f576SJeff Roberson * A pointer to a slab if successful, else NULL. 3958355f576SJeff Roberson */ 3968355f576SJeff Roberson static __inline uma_slab_t 39785dcf349SGleb Smirnoff hash_sfind(struct uma_hash *hash, uint8_t *data) 3988355f576SJeff Roberson { 3998355f576SJeff Roberson uma_slab_t slab; 4008355f576SJeff Roberson int hval; 4018355f576SJeff Roberson 4028355f576SJeff Roberson hval = UMA_HASH(hash, data); 4038355f576SJeff Roberson 4048355f576SJeff Roberson SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { 40585dcf349SGleb Smirnoff if ((uint8_t *)slab->us_data == data) 4068355f576SJeff Roberson return (slab); 4078355f576SJeff Roberson } 4088355f576SJeff Roberson return (NULL); 4098355f576SJeff Roberson } 4108355f576SJeff Roberson 41199571dc3SJeff Roberson static __inline uma_slab_t 41299571dc3SJeff Roberson vtoslab(vm_offset_t va) 41399571dc3SJeff Roberson { 41499571dc3SJeff Roberson vm_page_t p; 41599571dc3SJeff Roberson uma_slab_t slab; 41699571dc3SJeff Roberson 41799571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 41899571dc3SJeff Roberson slab = (uma_slab_t )p->object; 41999571dc3SJeff Roberson 42099571dc3SJeff Roberson if (p->flags & PG_SLAB) 42199571dc3SJeff Roberson return (slab); 42299571dc3SJeff Roberson else 42399571dc3SJeff Roberson return (NULL); 42499571dc3SJeff Roberson } 42599571dc3SJeff Roberson 42699571dc3SJeff Roberson static __inline void 42799571dc3SJeff Roberson vsetslab(vm_offset_t va, uma_slab_t slab) 42899571dc3SJeff Roberson { 42999571dc3SJeff Roberson vm_page_t p; 43099571dc3SJeff Roberson 4316fc96493SOlivier Houchard p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 43299571dc3SJeff Roberson p->object = (vm_object_t)slab; 43399571dc3SJeff Roberson p->flags |= PG_SLAB; 43499571dc3SJeff Roberson } 43599571dc3SJeff Roberson 43699571dc3SJeff Roberson static __inline void 43799571dc3SJeff Roberson vsetobj(vm_offset_t va, vm_object_t obj) 43899571dc3SJeff Roberson { 43999571dc3SJeff Roberson vm_page_t p; 44099571dc3SJeff Roberson 4416fc96493SOlivier Houchard p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 44299571dc3SJeff Roberson p->object = obj; 44399571dc3SJeff Roberson p->flags &= ~PG_SLAB; 44499571dc3SJeff Roberson } 4458355f576SJeff Roberson 44648eea375SJeff Roberson /* 44748eea375SJeff Roberson * The following two functions may be defined by architecture specific code 44848eea375SJeff Roberson * if they can provide more effecient allocation functions. This is useful 44948eea375SJeff Roberson * for using direct mapped addresses. 45048eea375SJeff Roberson */ 45185dcf349SGleb Smirnoff void *uma_small_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait); 45285dcf349SGleb Smirnoff void uma_small_free(void *mem, int size, uint8_t flags); 453af17e9a9SRobert Watson #endif /* _KERNEL */ 45448eea375SJeff Roberson 4558355f576SJeff Roberson #endif /* VM_UMA_INT_H */ 456