1.\"- 2.\" Copyright (c) 2001 Dag-Erling Coïdan Smørgrav 3.\" All rights reserved. 4.\" 5.\" Redistribution and use in source and binary forms, with or without 6.\" modification, are permitted provided that the following conditions 7.\" are met: 8.\" 1. Redistributions of source code must retain the above copyright 9.\" notice, this list of conditions and the following disclaimer. 10.\" 2. Redistributions in binary form must reproduce the above copyright 11.\" notice, this list of conditions and the following disclaimer in the 12.\" documentation and/or other materials provided with the distribution. 13.\" 14.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24.\" SUCH DAMAGE. 25.\" 26.\" $FreeBSD$ 27.\" 28.Dd November 22, 2019 29.Dt UMA 9 30.Os 31.Sh NAME 32.Nm UMA 33.Nd general-purpose kernel object allocator 34.Sh SYNOPSIS 35.In sys/param.h 36.In sys/queue.h 37.In vm/uma.h 38.Cd "options UMA_FIRSTTOUCH" 39.Cd "options UMA_XDOMAIN" 40.Bd -literal 41typedef int (*uma_ctor)(void *mem, int size, void *arg, int flags); 42typedef void (*uma_dtor)(void *mem, int size, void *arg); 43typedef int (*uma_init)(void *mem, int size, int flags); 44typedef void (*uma_fini)(void *mem, int size); 45typedef int (*uma_import)(void *arg, void **store, int count, int domain, 46 int flags); 47typedef void (*uma_release)(void *arg, void **store, int count); 48typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, int domain, 49 uint8_t *pflag, int wait); 50typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag); 51 52.Ed 53.Ft uma_zone_t 54.Fo uma_zcreate 55.Fa "char *name" "int size" 56.Fa "uma_ctor ctor" "uma_dtor dtor" "uma_init zinit" "uma_fini zfini" 57.Fa "int align" "uint16_t flags" 58.Fc 59.Ft uma_zone_t 60.Fo uma_zcache_create 61.Fa "char *name" "int size" 62.Fa "uma_ctor ctor" "uma_dtor dtor" "uma_init zinit" "uma_fini zfini" 63.Fa "uma_import zimport" "uma_release zrelease" 64.Fa "void *arg" "int flags" 65.Fc 66.Ft uma_zone_t 67.Fo uma_zsecond_create 68.Fa "char *name" 69.Fa "uma_ctor ctor" "uma_dtor dtor" "uma_init zinit" "uma_fini zfini" 70.Fa "uma_zone_t master" 71.Fc 72.Ft void 73.Fn uma_zdestroy "uma_zone_t zone" 74.Ft "void *" 75.Fn uma_zalloc "uma_zone_t zone" "int flags" 76.Ft "void *" 77.Fn uma_zalloc_arg "uma_zone_t zone" "void *arg" "int flags" 78.Ft "void *" 79.Fn uma_zalloc_domain "uma_zone_t zone" "void *arg" "int domain" "int flags" 80.Ft "void *" 81.Fn uma_zalloc_pcpu "uma_zone_t zone" "int flags" 82.Ft "void *" 83.Fn uma_zalloc_pcpu_arg "uma_zone_t zone" "void *arg" "int flags" 84.Ft void 85.Fn uma_zfree "uma_zone_t zone" "void *item" 86.Ft void 87.Fn uma_zfree_arg "uma_zone_t zone" "void *item" "void *arg" 88.Ft void 89.Fn uma_zfree_domain "uma_zone_t zone" "void *item" "void *arg" 90.Ft void 91.Fn uma_zfree_pcpu "uma_zone_t zone" "void *item" 92.Ft void 93.Fn uma_zfree_pcpu_arg "uma_zone_t zone" "void *item" "void *arg" 94.Ft void 95.Fn uma_prealloc "uma_zone_t zone" "int nitems" 96.Ft void 97.Fn uma_zone_reserve "uma_zone_t zone" "int nitems" 98.Ft void 99.Fn uma_zone_reserve_kva "uma_zone_t zone" "int nitems" 100.Ft void 101.Fn uma_reclaim "int req" 102.Ft void 103.Fn uma_zone_reclaim "uma_zone_t zone" "int req" 104.Ft void 105.Fn uma_zone_set_allocf "uma_zone_t zone" "uma_alloc allocf" 106.Ft void 107.Fn uma_zone_set_freef "uma_zone_t zone" "uma_free freef" 108.Ft int 109.Fn uma_zone_set_max "uma_zone_t zone" "int nitems" 110.Ft void 111.Fn uma_zone_set_maxcache "uma_zone_t zone" "int nitems" 112.Ft int 113.Fn uma_zone_get_max "uma_zone_t zone" 114.Ft int 115.Fn uma_zone_get_cur "uma_zone_t zone" 116.Ft void 117.Fn uma_zone_set_warning "uma_zone_t zone" "const char *warning" 118.Ft void 119.Fn uma_zone_set_maxaction "uma_zone_t zone" "void (*maxaction)(uma_zone_t)" 120.Ft void 121.Fn uma_reclaim 122.In sys/sysctl.h 123.Fn SYSCTL_UMA_MAX parent nbr name access zone descr 124.Fn SYSCTL_ADD_UMA_MAX ctx parent nbr name access zone descr 125.Fn SYSCTL_UMA_CUR parent nbr name access zone descr 126.Fn SYSCTL_ADD_UMA_CUR ctx parent nbr name access zone descr 127.Sh DESCRIPTION 128UMA (Universal Memory Allocator) provides an efficient interface for managing 129dynamically-sized collections of items of identical size, referred to as zones. 130Zones keep track of which items are in use and which 131are not, and UMA provides functions for allocating items from a zone and 132for releasing them back, making them available for subsequent allocation requests. 133Zones maintain per-CPU caches with linear scalability on SMP 134systems as well as round-robin and first-touch policies for NUMA 135systems. 136The number of items cached per CPU is bounded, and each zone additionally 137maintains an unbounded cache of items that is used to quickly satisfy 138per-CPU cache allocation misses. 139.Pp 140Two types of zones exist: regular zones and cache zones. 141In a regular zone, items are allocated from a slab, which is one or more 142virtually contiguous memory pages that have been allocated from the kernel's 143page allocator. 144Internally, slabs are managed by a UMA keg, which is responsible for allocating 145slabs and keeping track of their usage by one or more zones. 146In typical usage, there is one keg per zone, so slabs are not shared among 147multiple zones. 148.Pp 149Normal zones import items from a keg, and release items back to that keg if 150requested. 151Cache zones do not have a keg, and instead use custom import and release 152methods. 153For example, some collections of kernel objects are statically allocated 154at boot-time, and the size of the collection does not change. 155A cache zone can be used to implement an efficient allocator for the objects in 156such a collection. 157.Pp 158The 159.Fn uma_zcreate 160and 161.Fn uma_zcache_create 162functions create a new regular zone and cache zone, respectively. 163The 164.Fn uma_zsecond_create 165function creates a regular zone which shares the keg of the zone 166specified by the 167.Fa master 168argument. 169The 170.Fa name 171argument is a text name of the zone for debugging and stats; this memory 172should not be freed until the zone has been deallocated. 173.Pp 174The 175.Fa ctor 176and 177.Fa dtor 178arguments are callback functions that are called by 179the UMA subsystem at the time of the call to 180.Fn uma_zalloc 181and 182.Fn uma_zfree 183respectively. 184Their purpose is to provide hooks for initializing or 185destroying things that need to be done at the time of the allocation 186or release of a resource. 187A good usage for the 188.Fa ctor 189and 190.Fa dtor 191callbacks might be to initialize a data structure embedded in the item, 192such as a 193.Xr queue 3 194head. 195.Pp 196The 197.Fa zinit 198and 199.Fa zfini 200arguments are used to optimize the allocation of items from the zone. 201They are called by the UMA subsystem whenever 202it needs to allocate or free items to satisfy requests or memory pressure. 203A good use for the 204.Fa zinit 205and 206.Fa zfini 207callbacks might be to 208initialize and destroy a mutex contained within an item. 209This would allow one to avoid destroying and re-initializing the mutex 210each time the item is freed and re-allocated. 211They are not called on each call to 212.Fn uma_zalloc 213and 214.Fn uma_zfree 215but rather when an item is imported into a zone's cache, and when a zone 216releases an item to the slab allocator, typically as a response to memory 217pressure. 218.Pp 219For 220.Fn uma_zcache_create , 221the 222.Fa zimport 223and 224.Fa zrelease 225functions are called to import items into the zone and to release items 226from the zone, respectively. 227The 228.Fa zimport 229function should store pointers to items in the 230.Fa store 231array, which contains a maximum of 232.Fa count 233entries. 234The function must return the number of imported items, which may be less than 235the maximum. 236Similarly, the 237.Fa store 238parameter to the 239.Fa zrelease 240function contains an array of 241.Fa count 242pointers to items. 243The 244.Fa arg 245parameter passed to 246.Fn uma_zcache_create 247is provided to the import and release functions. 248The 249.Fa domain 250parameter to 251.Fa zimport 252specifies the requested 253.Xr numa 4 254domain for the allocation. 255It is either a NUMA domain number or the special value 256.Dv UMA_ANYDOMAIN . 257.Pp 258The 259.Fa flags 260argument of 261.Fn uma_zcreate 262and 263.Fn uma_zcache_create 264is a subset of the following flags: 265.Bl -tag -width "foo" 266.It Dv UMA_ZONE_NOFREE 267Slabs allocated to the zone's keg are never freed. 268.It Dv UMA_ZONE_NODUMP 269Pages belonging to the zone will not be included in minidumps. 270.It Dv UMA_ZONE_PCPU 271An allocation from zone would have 272.Va mp_ncpu 273shadow copies, that are privately assigned to CPUs. 274A CPU can address its private copy using base the allocation address plus 275a multiple of the current CPU ID and 276.Fn sizeof "struct pcpu" : 277.Bd -literal -offset indent 278foo_zone = uma_zcreate(..., UMA_ZONE_PCPU); 279 ... 280foo_base = uma_zalloc(foo_zone, ...); 281 ... 282critical_enter(); 283foo_pcpu = (foo_t *)zpcpu_get(foo_base); 284/* do something with foo_pcpu */ 285critical_exit(); 286 287.Ed 288Note that 289.Dv M_ZERO 290cannot be used when allocating items from a PCPU zone. 291To obtain zeroed memory from a PCPU zone, use the 292.Fn uma_zalloc_pcpu 293function and its variants instead, and pass 294.Dv M_ZERO . 295.It Dv UMA_ZONE_OFFPAGE 296By default book-keeping of items within a slab is done in the slab page itself. 297This flag explicitly tells subsystem that book-keeping structure should be 298allocated separately from special internal zone. 299This flag requires either 300.Dv UMA_ZONE_VTOSLAB 301or 302.Dv UMA_ZONE_HASH , 303since subsystem requires a mechanism to find a book-keeping structure 304to an item being freed. 305The subsystem may choose to prefer offpage book-keeping for certain zones 306implicitly. 307.It Dv UMA_ZONE_ZINIT 308The zone will have its 309.Ft uma_init 310method set to internal method that initializes a new allocated slab 311to all zeros. 312Do not mistake 313.Ft uma_init 314method with 315.Ft uma_ctor . 316A zone with 317.Dv UMA_ZONE_ZINIT 318flag would not return zeroed memory on every 319.Fn uma_zalloc . 320.It Dv UMA_ZONE_HASH 321The zone should use an internal hash table to find slab book-keeping 322structure where an allocation being freed belongs to. 323.It Dv UMA_ZONE_VTOSLAB 324The zone should use special field of 325.Vt vm_page_t 326to find slab book-keeping structure where an allocation being freed belongs to. 327.It Dv UMA_ZONE_MALLOC 328The zone is for the 329.Xr malloc 9 330subsystem. 331.It Dv UMA_ZONE_VM 332The zone is for the VM subsystem. 333.It Dv UMA_ZONE_NUMA 334The zone should use a first-touch NUMA policy rather than the round-robin 335default. 336If the 337.Dv UMA_FIRSTTOUCH 338kernel option is configured, all zones implicitly use a first-touch policy, 339and the 340.Dv UMA_ZONE_NUMA 341flag has no effect. 342The 343.Dv UMA_XDOMAIN 344kernel option, when configured, causes UMA to do the extra tracking to ensure 345that allocations from first-touch zones are always local. 346Otherwise, consumers that do not free memory on the same domain from which it 347was allocated will cause mixing in per-CPU caches. 348See 349.Xr numa 4 350for more details. 351.El 352.Pp 353Zones can be destroyed using 354.Fn uma_zdestroy , 355freeing all memory that is cached in the zone. 356All items allocated from the zone must be freed to the zone before the zone 357may be safely destroyed. 358.Pp 359To allocate an item from a zone, simply call 360.Fn uma_zalloc 361with a pointer to that zone and set the 362.Fa flags 363argument to selected flags as documented in 364.Xr malloc 9 . 365It will return a pointer to an item if successful, or 366.Dv NULL 367in the rare case where all items in the zone are in use and the 368allocator is unable to grow the zone and 369.Dv M_NOWAIT 370is specified. 371.Pp 372Items are released back to the zone from which they were allocated by 373calling 374.Fn uma_zfree 375with a pointer to the zone and a pointer to the item. 376If 377.Fa item 378is 379.Dv NULL , 380then 381.Fn uma_zfree 382does nothing. 383.Pp 384The variants 385.Fn uma_zalloc_arg 386and 387.Fn uma_zfree_arg 388allow callers to 389specify an argument for the 390.Dv ctor 391and 392.Dv dtor 393functions of the zone, respectively. 394The 395.Fn uma_zalloc_domain 396function allows callers to specify a fixed 397.Xr numa 4 398domain to allocate from. 399This uses a guaranteed but slow path in the allocator which reduces 400concurrency. 401The 402.Fn uma_zfree_domain 403function should be used to return memory allocated in this fashion. 404This function infers the domain from the pointer and does not require it as an 405argument. 406.Pp 407The 408.Fn uma_zone_prealloc 409function allocates slabs for the requested number of items, typically following 410the initial creation of a zone. 411Subsequent allocations from the zone will be satisfied using the pre-allocated 412slabs. 413Note that slab allocation is performed with the 414.Dv M_WAITOK 415flag, so 416.Fn uma_zone_prealloc 417may sleep. 418.Pp 419The 420.Fn uma_zone_reserve 421function sets the number of reserved items for the zone. 422.Fn uma_zalloc 423and variants will ensure that the zone contains at least the reserved number 424of free items. 425Reserved items may be allocated by specifying 426.Dv M_USE_RESERVE 427in the allocation request flags. 428.Fn uma_zone_reserve 429does not perform any pre-allocation by itself. 430.Pp 431The 432.Fn uma_zone_reserve_kva 433function pre-allocates kernel virtual address space for the requested 434number of items. 435Subsequent allocations from the zone will be satisfied using the pre-allocated 436address space. 437Note that unlike 438.Fn uma_zone_reserve , 439.Fn uma_zone_reserve_kva 440does not restrict the use of the pre-allocation to 441.Dv M_USE_RESERVE 442requests. 443.Pp 444The 445.Fn uma_reclaim 446and 447.Fn uma_zone_reclaim 448functions reclaim cached items from UMA zones, releasing unused memory. 449The 450.Fn uma_reclaim 451function reclaims items from all regular zones, while 452.Fn uma_zone_reclaim 453reclaims items only from the specified zone. 454The 455.Fa req 456parameter must be one of three values which specify how aggressively 457items are to be reclaimed: 458.Bl -tag -width indent 459.It Dv UMA_RECLAIM_TRIM 460Reclaim items only in excess of the zone's estimated working set size. 461The working set size is periodically updated and tracks the recent history 462of the zone's usage. 463.It Dv UMA_RECLAIM_DRAIN 464Reclaim all items from the unbounded cache. 465Free items in the per-CPU caches are left alone. 466.It Dv UMA_RECLAIM_DRAIN_CPU 467Reclaim all cached items. 468.El 469.Pp 470The 471.Fn uma_zone_set_allocf 472and 473.Fn uma_zone_set_freef 474functions allow a zone's default slab allocation and free functions to be 475overridden. 476This is useful if the zone's items have special memory allocation constraints. 477For example, if multi-page objects are required to be physically contiguous, 478an 479.Fa allocf 480function which requests contiguous memory from the kernel's page allocator 481may be used. 482.Pp 483The 484.Fn uma_zone_set_max 485function limits the number of items 486.Pq and therefore memory 487that can be allocated to 488.Fa zone . 489The 490.Fa nitems 491argument specifies the requested upper limit number of items. 492The effective limit is returned to the caller, as it may end up being higher 493than requested due to the implementation rounding up to ensure all memory pages 494allocated to the zone are utilised to capacity. 495The limit applies to the total number of items in the zone, which includes 496allocated items, free items and free items in the per-cpu caches. 497On systems with more than one CPU it may not be possible to allocate 498the specified number of items even when there is no shortage of memory, 499because all of the remaining free items may be in the caches of the 500other CPUs when the limit is hit. 501.Pp 502The 503.Fn uma_zone_set_maxcache 504function limits the number of free items which may be cached in the zone. 505This limit applies to both the per-CPU caches and the cache of free buckets. 506.Pp 507The 508.Fn uma_zone_get_max 509function returns the effective upper limit number of items for a zone. 510.Pp 511The 512.Fn uma_zone_get_cur 513function returns an approximation of the number of items currently allocated 514from the zone. 515The returned value is approximate because appropriate synchronisation to 516determine an exact value is not performed by the implementation. 517This ensures low overhead at the expense of potentially stale data being used 518in the calculation. 519.Pp 520The 521.Fn uma_zone_set_warning 522function sets a warning that will be printed on the system console when the 523given zone becomes full and fails to allocate an item. 524The warning will be printed no more often than every five minutes. 525Warnings can be turned off globally by setting the 526.Va vm.zone_warnings 527sysctl tunable to 528.Va 0 . 529.Pp 530The 531.Fn uma_zone_set_maxaction 532function sets a function that will be called when the given zone becomes full 533and fails to allocate an item. 534The function will be called with the zone locked. 535Also, the function 536that called the allocation function may have held additional locks. 537Therefore, 538this function should do very little work (similar to a signal handler). 539.Pp 540The 541.Fn SYSCTL_UMA_MAX parent nbr name access zone descr 542macro declares a static 543.Xr sysctl 9 544oid that exports the effective upper limit number of items for a zone. 545The 546.Fa zone 547argument should be a pointer to 548.Vt uma_zone_t . 549A read of the oid returns value obtained through 550.Fn uma_zone_get_max . 551A write to the oid sets new value via 552.Fn uma_zone_set_max . 553The 554.Fn SYSCTL_ADD_UMA_MAX ctx parent nbr name access zone descr 555macro is provided to create this type of oid dynamically. 556.Pp 557The 558.Fn SYSCTL_UMA_CUR parent nbr name access zone descr 559macro declares a static read-only 560.Xr sysctl 9 561oid that exports the approximate current occupancy of the zone. 562The 563.Fa zone 564argument should be a pointer to 565.Vt uma_zone_t . 566A read of the oid returns value obtained through 567.Fn uma_zone_get_cur . 568The 569.Fn SYSCTL_ADD_UMA_CUR ctx parent nbr name zone descr 570macro is provided to create this type of oid dynamically. 571.Sh IMPLEMENTATION NOTES 572The memory that these allocation calls return is not executable. 573The 574.Fn uma_zalloc 575function does not support the 576.Dv M_EXEC 577flag to allocate executable memory. 578Not all platforms enforce a distinction between executable and 579non-executable memory. 580.Sh SEE ALSO 581.Xr numa 4 , 582.Xr vmstat 8 , 583.Xr malloc 9 584.Rs 585.%A Jeff Bonwick 586.%T "The Slab Allocator: An Object-Caching Kernel Memory Allocator" 587.%D 1994 588.Re 589.Sh HISTORY 590The zone allocator first appeared in 591.Fx 3.0 . 592It was radically changed in 593.Fx 5.0 594to function as a slab allocator. 595.Sh AUTHORS 596.An -nosplit 597The zone allocator was written by 598.An John S. Dyson . 599The zone allocator was rewritten in large parts by 600.An Jeff Roberson Aq Mt jeff@FreeBSD.org 601to function as a slab allocator. 602.Pp 603This manual page was written by 604.An Dag-Erling Sm\(/orgrav Aq Mt des@FreeBSD.org . 605Changes for UMA by 606.An Jeroen Ruigrok van der Werven Aq Mt asmodai@FreeBSD.org . 607