18355f576SJeff Roberson /* 2f461cf22SJeff Roberson * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 38355f576SJeff Roberson * All rights reserved. 48355f576SJeff Roberson * 58355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 68355f576SJeff Roberson * modification, are permitted provided that the following conditions 78355f576SJeff Roberson * are met: 88355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 98355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 108355f576SJeff Roberson * disclaimer. 118355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 128355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 138355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 148355f576SJeff Roberson * 158355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 168355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 178355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 188355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 198355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 208355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 218355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 228355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 238355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 248355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 258355f576SJeff Roberson * 268355f576SJeff Roberson * $FreeBSD$ 278355f576SJeff Roberson * 288355f576SJeff Roberson */ 298355f576SJeff Roberson 308355f576SJeff Roberson /* 318355f576SJeff Roberson * This file includes definitions, structures, prototypes, and inlines that 328355f576SJeff Roberson * should not be used outside of the actual implementation of UMA. 338355f576SJeff Roberson */ 348355f576SJeff Roberson 358355f576SJeff Roberson /* 368355f576SJeff Roberson * Here's a quick description of the relationship between the objects: 378355f576SJeff Roberson * 388355f576SJeff Roberson * Zones contain lists of slabs which are stored in either the full bin, empty 398355f576SJeff Roberson * bin, or partially allocated bin, to reduce fragmentation. They also contain 408355f576SJeff Roberson * the user supplied value for size, which is adjusted for alignment purposes 418355f576SJeff Roberson * and rsize is the result of that. The zone also stores information for 428355f576SJeff Roberson * managing a hash of page addresses that maps pages to uma_slab_t structures 438355f576SJeff Roberson * for pages that don't have embedded uma_slab_t's. 448355f576SJeff Roberson * 458355f576SJeff Roberson * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may 468355f576SJeff Roberson * be allocated off the page from a special slab zone. The free list within a 478355f576SJeff Roberson * slab is managed with a linked list of indexes, which are 8 bit values. If 488355f576SJeff Roberson * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit 498355f576SJeff Roberson * values. Currently on alpha you can get 250 or so 32 byte items and on x86 508355f576SJeff Roberson * you can get 250 or so 16byte items. For item sizes that would yield more 51c235bfa5SJeff Roberson * than 10% memory waste we potentially allocate a separate uma_slab_t if this 528355f576SJeff Roberson * will improve the number of items per slab that will fit. 538355f576SJeff Roberson * 548355f576SJeff Roberson * Other potential space optimizations are storing the 8bit of linkage in space 558355f576SJeff Roberson * wasted between items due to alignment problems. This may yield a much better 568355f576SJeff Roberson * memory footprint for certain sizes of objects. Another alternative is to 578355f576SJeff Roberson * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer 588355f576SJeff Roberson * dynamic slab sizes because we could stick with 8 bit indexes and only use 598355f576SJeff Roberson * large slab sizes for zones with a lot of waste per slab. This may create 608355f576SJeff Roberson * ineffeciencies in the vm subsystem due to fragmentation in the address space. 618355f576SJeff Roberson * 628355f576SJeff Roberson * The only really gross cases, with regards to memory waste, are for those 638355f576SJeff Roberson * items that are just over half the page size. You can get nearly 50% waste, 648355f576SJeff Roberson * so you fall back to the memory footprint of the power of two allocator. I 658355f576SJeff Roberson * have looked at memory allocation sizes on many of the machines available to 668355f576SJeff Roberson * me, and there does not seem to be an abundance of allocations at this range 678355f576SJeff Roberson * so at this time it may not make sense to optimize for it. This can, of 688355f576SJeff Roberson * course, be solved with dynamic slab sizes. 698355f576SJeff Roberson * 708355f576SJeff Roberson */ 718355f576SJeff Roberson 728355f576SJeff Roberson /* 738355f576SJeff Roberson * This is the representation for normal (Non OFFPAGE slab) 748355f576SJeff Roberson * 758355f576SJeff Roberson * i == item 768355f576SJeff Roberson * s == slab pointer 778355f576SJeff Roberson * 788355f576SJeff Roberson * <---------------- Page (UMA_SLAB_SIZE) ------------------> 798355f576SJeff Roberson * ___________________________________________________________ 808355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ | 818355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header|| 828355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________|| 838355f576SJeff Roberson * |___________________________________________________________| 848355f576SJeff Roberson * 858355f576SJeff Roberson * 868355f576SJeff Roberson * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE. 878355f576SJeff Roberson * 888355f576SJeff Roberson * ___________________________________________________________ 898355f576SJeff Roberson * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | 908355f576SJeff Roberson * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| | 918355f576SJeff Roberson * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| | 928355f576SJeff Roberson * |___________________________________________________________| 938355f576SJeff Roberson * ___________ ^ 948355f576SJeff Roberson * |slab header| | 958355f576SJeff Roberson * |___________|---* 968355f576SJeff Roberson * 978355f576SJeff Roberson */ 988355f576SJeff Roberson 998355f576SJeff Roberson #ifndef VM_UMA_INT_H 1008355f576SJeff Roberson #define VM_UMA_INT_H 1018355f576SJeff Roberson 1028355f576SJeff Roberson #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */ 1038355f576SJeff Roberson #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ 1048355f576SJeff Roberson #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ 1058355f576SJeff Roberson 106e602ba25SJulian Elischer #define UMA_BOOT_PAGES 30 /* Number of pages allocated for startup */ 1078355f576SJeff Roberson #define UMA_WORKING_TIME 20 /* Seconds worth of items to keep */ 1088355f576SJeff Roberson 1098355f576SJeff Roberson 1108355f576SJeff Roberson /* Max waste before going to off page slab management */ 1118355f576SJeff Roberson #define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10) 1128355f576SJeff Roberson 1138355f576SJeff Roberson /* 1148355f576SJeff Roberson * I doubt there will be many cases where this is exceeded. This is the initial 1158355f576SJeff Roberson * size of the hash table for uma_slabs that are managed off page. This hash 1168355f576SJeff Roberson * does expand by powers of two. Currently it doesn't get smaller. 1178355f576SJeff Roberson */ 1188355f576SJeff Roberson #define UMA_HASH_SIZE_INIT 32 1198355f576SJeff Roberson 1208355f576SJeff Roberson 1218355f576SJeff Roberson /* 1228355f576SJeff Roberson * I should investigate other hashing algorithms. This should yield a low 1238355f576SJeff Roberson * number of collisions if the pages are relatively contiguous. 1248355f576SJeff Roberson * 1258355f576SJeff Roberson * This is the same algorithm that most processor caches use. 1268355f576SJeff Roberson * 1278355f576SJeff Roberson * I'm shifting and masking instead of % because it should be faster. 1288355f576SJeff Roberson */ 1298355f576SJeff Roberson 1308355f576SJeff Roberson #define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \ 1318355f576SJeff Roberson (h)->uh_hashmask) 1328355f576SJeff Roberson 1338355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem) \ 1348355f576SJeff Roberson SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \ 1358355f576SJeff Roberson (mem))], (s), us_hlink); 1368355f576SJeff Roberson #define UMA_HASH_REMOVE(h, s, mem) \ 1378355f576SJeff Roberson SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \ 1388355f576SJeff Roberson (mem))], (s), uma_slab, us_hlink); 1398355f576SJeff Roberson 1408355f576SJeff Roberson /* Page management structure */ 1418355f576SJeff Roberson 1428355f576SJeff Roberson /* Sorry for the union, but space efficiency is important */ 1438355f576SJeff Roberson struct uma_slab { 1448355f576SJeff Roberson uma_zone_t us_zone; /* Zone we live in */ 1458355f576SJeff Roberson union { 146c5d771b8SPoul-Henning Kamp LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */ 147c5d771b8SPoul-Henning Kamp unsigned long _us_size; /* Size of allocation */ 1488355f576SJeff Roberson } us_type; 1498355f576SJeff Roberson SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */ 1508355f576SJeff Roberson u_int8_t *us_data; /* First item */ 1518355f576SJeff Roberson u_int8_t us_flags; /* Page flags see uma.h */ 1528355f576SJeff Roberson u_int8_t us_freecount; /* How many are free? */ 1538355f576SJeff Roberson u_int8_t us_firstfree; /* First free item index */ 1548355f576SJeff Roberson u_int8_t us_freelist[1]; /* Free List (actually larger) */ 1558355f576SJeff Roberson }; 1568355f576SJeff Roberson 157c5d771b8SPoul-Henning Kamp #define us_link us_type._us_link 158c5d771b8SPoul-Henning Kamp #define us_size us_type._us_size 1598355f576SJeff Roberson 1608355f576SJeff Roberson typedef struct uma_slab * uma_slab_t; 1618355f576SJeff Roberson 1628355f576SJeff Roberson /* Hash table for freed address -> slab translation */ 1638355f576SJeff Roberson 1648355f576SJeff Roberson SLIST_HEAD(slabhead, uma_slab); 1658355f576SJeff Roberson 1668355f576SJeff Roberson struct uma_hash { 1678355f576SJeff Roberson struct slabhead *uh_slab_hash; /* Hash table for slabs */ 1688355f576SJeff Roberson int uh_hashsize; /* Current size of the hash table */ 1698355f576SJeff Roberson int uh_hashmask; /* Mask used during hashing */ 1708355f576SJeff Roberson }; 1718355f576SJeff Roberson 1728355f576SJeff Roberson /* 1738355f576SJeff Roberson * Structures for per cpu queues. 1748355f576SJeff Roberson */ 1758355f576SJeff Roberson 1768355f576SJeff Roberson struct uma_bucket { 1778355f576SJeff Roberson LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */ 178cae33c14SJeff Roberson int16_t ub_cnt; /* Count of free items. */ 179cae33c14SJeff Roberson int16_t ub_entries; /* Max items. */ 180cae33c14SJeff Roberson void *ub_bucket[]; /* actual allocation storage */ 1818355f576SJeff Roberson }; 1828355f576SJeff Roberson 1838355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t; 1848355f576SJeff Roberson 1858355f576SJeff Roberson struct uma_cache { 1868355f576SJeff Roberson uma_bucket_t uc_freebucket; /* Bucket we're freeing to */ 1878355f576SJeff Roberson uma_bucket_t uc_allocbucket; /* Bucket to allocate from */ 1888355f576SJeff Roberson u_int64_t uc_allocs; /* Count of allocations */ 1898355f576SJeff Roberson }; 1908355f576SJeff Roberson 1918355f576SJeff Roberson typedef struct uma_cache * uma_cache_t; 1928355f576SJeff Roberson 1938355f576SJeff Roberson /* 1948355f576SJeff Roberson * Zone management structure 1958355f576SJeff Roberson * 1968355f576SJeff Roberson * TODO: Optimize for cache line size 1978355f576SJeff Roberson * 1988355f576SJeff Roberson */ 1998355f576SJeff Roberson struct uma_zone { 2008355f576SJeff Roberson char *uz_name; /* Text name of the zone */ 2018355f576SJeff Roberson LIST_ENTRY(uma_zone) uz_link; /* List of all zones */ 2028355f576SJeff Roberson u_int32_t uz_align; /* Alignment mask */ 2038355f576SJeff Roberson u_int32_t uz_pages; /* Total page count */ 2048355f576SJeff Roberson 2058355f576SJeff Roberson /* Used during alloc / free */ 2068355f576SJeff Roberson struct mtx uz_lock; /* Lock for the zone */ 2078355f576SJeff Roberson u_int32_t uz_free; /* Count of items free in slabs */ 2088355f576SJeff Roberson u_int16_t uz_ipers; /* Items per slab */ 2098355f576SJeff Roberson u_int16_t uz_flags; /* Internal flags */ 2108355f576SJeff Roberson 2118355f576SJeff Roberson LIST_HEAD(,uma_slab) uz_part_slab; /* partially allocated slabs */ 2128355f576SJeff Roberson LIST_HEAD(,uma_slab) uz_free_slab; /* empty slab list */ 2138355f576SJeff Roberson LIST_HEAD(,uma_slab) uz_full_slab; /* full slabs */ 2148355f576SJeff Roberson LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */ 2158355f576SJeff Roberson LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */ 2168355f576SJeff Roberson u_int32_t uz_size; /* Requested size of each item */ 2178355f576SJeff Roberson u_int32_t uz_rsize; /* Real size of each item */ 2188355f576SJeff Roberson 2198355f576SJeff Roberson struct uma_hash uz_hash; 2208355f576SJeff Roberson u_int16_t uz_pgoff; /* Offset to uma_slab struct */ 2218355f576SJeff Roberson u_int16_t uz_ppera; /* pages per allocation from backend */ 2228355f576SJeff Roberson u_int16_t uz_cacheoff; /* Next cache offset */ 2238355f576SJeff Roberson u_int16_t uz_cachemax; /* Max cache offset */ 2248355f576SJeff Roberson 2258355f576SJeff Roberson uma_ctor uz_ctor; /* Constructor for each allocation */ 2268355f576SJeff Roberson uma_dtor uz_dtor; /* Destructor */ 2278355f576SJeff Roberson u_int64_t uz_allocs; /* Total number of allocations */ 2288355f576SJeff Roberson 2298355f576SJeff Roberson uma_init uz_init; /* Initializer for each item */ 2308355f576SJeff Roberson uma_fini uz_fini; /* Discards memory */ 2318355f576SJeff Roberson uma_alloc uz_allocf; /* Allocation function */ 2328355f576SJeff Roberson uma_free uz_freef; /* Free routine */ 2338355f576SJeff Roberson struct vm_object *uz_obj; /* Zone specific object */ 2348355f576SJeff Roberson vm_offset_t uz_kva; /* Base kva for zones with objs */ 2358355f576SJeff Roberson u_int32_t uz_maxpages; /* Maximum number of pages to alloc */ 2368355f576SJeff Roberson u_int64_t uz_oallocs; /* old allocs count */ 2378355f576SJeff Roberson u_int64_t uz_wssize; /* Working set size */ 2388355f576SJeff Roberson int uz_recurse; /* Allocation recursion count */ 239a553d4b8SJeff Roberson uint16_t uz_fills; /* Outstanding bucket fills */ 240a553d4b8SJeff Roberson uint16_t uz_count; /* Highest value ub_ptr can have */ 2418355f576SJeff Roberson /* 2428355f576SJeff Roberson * This HAS to be the last item because we adjust the zone size 2438355f576SJeff Roberson * based on NCPU and then allocate the space for the zones. 2448355f576SJeff Roberson */ 2458355f576SJeff Roberson struct uma_cache uz_cpu[1]; /* Per cpu caches */ 2468355f576SJeff Roberson }; 2478355f576SJeff Roberson 2488355f576SJeff Roberson #define UMA_CACHE_INC 16 /* How much will we move data */ 2498355f576SJeff Roberson 250b60f5b79SJeff Roberson /* 251b60f5b79SJeff Roberson * These flags must not overlap with the UMA_ZONE flags specified in uma.h. 252b60f5b79SJeff Roberson */ 253b60f5b79SJeff Roberson #define UMA_ZFLAG_PRIVALLOC 0x1000 /* Use uz_allocf. */ 254b60f5b79SJeff Roberson #define UMA_ZFLAG_INTERNAL 0x2000 /* No offpage no PCPU. */ 255b60f5b79SJeff Roberson #define UMA_ZFLAG_FULL 0x4000 /* Reached uz_maxpages */ 256b60f5b79SJeff Roberson #define UMA_ZFLAG_CACHEONLY 0x8000 /* Don't ask VM for buckets. */ 2578355f576SJeff Roberson 2588355f576SJeff Roberson /* Internal prototypes */ 2598355f576SJeff Roberson static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data); 2608355f576SJeff Roberson void *uma_large_malloc(int size, int wait); 2618355f576SJeff Roberson void uma_large_free(uma_slab_t slab); 2628355f576SJeff Roberson 2638355f576SJeff Roberson /* Lock Macros */ 2648355f576SJeff Roberson 26528bc4419SJeff Roberson #define ZONE_LOCK_INIT(z, lc) \ 26628bc4419SJeff Roberson do { \ 26728bc4419SJeff Roberson if ((lc)) \ 26828bc4419SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 26928bc4419SJeff Roberson (z)->uz_name, MTX_DEF | MTX_DUPOK); \ 27028bc4419SJeff Roberson else \ 27128bc4419SJeff Roberson mtx_init(&(z)->uz_lock, (z)->uz_name, \ 27228bc4419SJeff Roberson "UMA zone", MTX_DEF | MTX_DUPOK); \ 27328bc4419SJeff Roberson } while (0) 27428bc4419SJeff Roberson 2758355f576SJeff Roberson #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock) 2768355f576SJeff Roberson #define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock) 2778355f576SJeff Roberson #define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock) 2788355f576SJeff Roberson 279d88797c2SBosko Milekic #define CPU_LOCK_INIT(cpu) \ 280d88797c2SBosko Milekic mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu", \ 281d88797c2SBosko Milekic MTX_DEF | MTX_DUPOK) 2828355f576SJeff Roberson 283d88797c2SBosko Milekic #define CPU_LOCK(cpu) \ 284d88797c2SBosko Milekic mtx_lock(&uma_pcpu_mtx[(cpu)]) 2858355f576SJeff Roberson 286d88797c2SBosko Milekic #define CPU_UNLOCK(cpu) \ 287d88797c2SBosko Milekic mtx_unlock(&uma_pcpu_mtx[(cpu)]) 2888355f576SJeff Roberson 2898355f576SJeff Roberson /* 2908355f576SJeff Roberson * Find a slab within a hash table. This is used for OFFPAGE zones to lookup 2918355f576SJeff Roberson * the slab structure. 2928355f576SJeff Roberson * 2938355f576SJeff Roberson * Arguments: 2948355f576SJeff Roberson * hash The hash table to search. 2958355f576SJeff Roberson * data The base page of the item. 2968355f576SJeff Roberson * 2978355f576SJeff Roberson * Returns: 2988355f576SJeff Roberson * A pointer to a slab if successful, else NULL. 2998355f576SJeff Roberson */ 3008355f576SJeff Roberson static __inline uma_slab_t 3018355f576SJeff Roberson hash_sfind(struct uma_hash *hash, u_int8_t *data) 3028355f576SJeff Roberson { 3038355f576SJeff Roberson uma_slab_t slab; 3048355f576SJeff Roberson int hval; 3058355f576SJeff Roberson 3068355f576SJeff Roberson hval = UMA_HASH(hash, data); 3078355f576SJeff Roberson 3088355f576SJeff Roberson SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) { 3098355f576SJeff Roberson if ((u_int8_t *)slab->us_data == data) 3108355f576SJeff Roberson return (slab); 3118355f576SJeff Roberson } 3128355f576SJeff Roberson return (NULL); 3138355f576SJeff Roberson } 3148355f576SJeff Roberson 31599571dc3SJeff Roberson static __inline uma_slab_t 31699571dc3SJeff Roberson vtoslab(vm_offset_t va) 31799571dc3SJeff Roberson { 31899571dc3SJeff Roberson vm_page_t p; 31999571dc3SJeff Roberson uma_slab_t slab; 32099571dc3SJeff Roberson 32199571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract(va)); 32299571dc3SJeff Roberson slab = (uma_slab_t )p->object; 32399571dc3SJeff Roberson 32499571dc3SJeff Roberson if (p->flags & PG_SLAB) 32599571dc3SJeff Roberson return (slab); 32699571dc3SJeff Roberson else 32799571dc3SJeff Roberson return (NULL); 32899571dc3SJeff Roberson } 32999571dc3SJeff Roberson 33099571dc3SJeff Roberson static __inline void 33199571dc3SJeff Roberson vsetslab(vm_offset_t va, uma_slab_t slab) 33299571dc3SJeff Roberson { 33399571dc3SJeff Roberson vm_page_t p; 33499571dc3SJeff Roberson 33599571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va)); 33699571dc3SJeff Roberson p->object = (vm_object_t)slab; 33799571dc3SJeff Roberson p->flags |= PG_SLAB; 33899571dc3SJeff Roberson } 33999571dc3SJeff Roberson 34099571dc3SJeff Roberson static __inline void 34199571dc3SJeff Roberson vsetobj(vm_offset_t va, vm_object_t obj) 34299571dc3SJeff Roberson { 34399571dc3SJeff Roberson vm_page_t p; 34499571dc3SJeff Roberson 34599571dc3SJeff Roberson p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va)); 34699571dc3SJeff Roberson p->object = obj; 34799571dc3SJeff Roberson p->flags &= ~PG_SLAB; 34899571dc3SJeff Roberson } 3498355f576SJeff Roberson 35048eea375SJeff Roberson /* 35148eea375SJeff Roberson * The following two functions may be defined by architecture specific code 35248eea375SJeff Roberson * if they can provide more effecient allocation functions. This is useful 35348eea375SJeff Roberson * for using direct mapped addresses. 35448eea375SJeff Roberson */ 35548eea375SJeff Roberson void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait); 35648eea375SJeff Roberson void uma_small_free(void *mem, int size, u_int8_t flags); 35748eea375SJeff Roberson 3588355f576SJeff Roberson #endif /* VM_UMA_INT_H */ 359