1fbf59bc9STejun Heo /* 288999a89STejun Heo * mm/percpu.c - percpu memory allocator 3fbf59bc9STejun Heo * 4fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 5fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6fbf59bc9STejun Heo * 79c015162SDennis Zhou (Facebook) * This file is released under the GPLv2 license. 8fbf59bc9STejun Heo * 99c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu 109c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is 119c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped 129c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine. 13fbf59bc9STejun Heo * 14fbf59bc9STejun Heo * c0 c1 c2 15fbf59bc9STejun Heo * ------------------- ------------------- ------------ 16fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 17fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 18fbf59bc9STejun Heo * 199c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an 209c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 219c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 229c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base 239c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the 249c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size. 25fbf59bc9STejun Heo * 269c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle 279c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services 289c015162SDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structure like so: 299c015162SDennis Zhou (Facebook) * 309c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic> 319c015162SDennis Zhou (Facebook) * 329c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the 339c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static 349c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section 359c015162SDennis Zhou (Facebook) * takes care of normal allocations. 36fbf59bc9STejun Heo * 37fbf59bc9STejun Heo * Allocation state in each chunk is kept using an array of integers 38fbf59bc9STejun Heo * on chunk->map. A positive value in the map represents a free 39fbf59bc9STejun Heo * region and negative allocated. Allocation inside a chunk is done 40fbf59bc9STejun Heo * by scanning this map sequentially and serving the first matching 41fbf59bc9STejun Heo * entry. This is mostly copied from the percpu_modalloc() allocator. 42e1b9aa3fSChristoph Lameter * Chunks can be determined from the address using the index field 43e1b9aa3fSChristoph Lameter * in the page struct. The index field contains a pointer to the chunk. 44fbf59bc9STejun Heo * 459c015162SDennis Zhou (Facebook) * These chunks are organized into lists according to free_size and 469c015162SDennis Zhou (Facebook) * tries to allocate from the fullest chunk first. Each chunk maintains 479c015162SDennis Zhou (Facebook) * a maximum contiguous area size hint which is guaranteed to be equal 489c015162SDennis Zhou (Facebook) * to or larger than the maximum contiguous area in the chunk. This 499c015162SDennis Zhou (Facebook) * helps prevent the allocator from iterating over chunks unnecessarily. 509c015162SDennis Zhou (Facebook) * 514091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 52fbf59bc9STejun Heo * 53fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 54e0100983STejun Heo * regular address to percpu pointer and back if they need to be 55e0100983STejun Heo * different from the default 56fbf59bc9STejun Heo * 578d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 588d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 59fbf59bc9STejun Heo */ 60fbf59bc9STejun Heo 61870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 62870d4b12SJoe Perches 63fbf59bc9STejun Heo #include <linux/bitmap.h> 64fbf59bc9STejun Heo #include <linux/bootmem.h> 65fd1e8a1fSTejun Heo #include <linux/err.h> 66ca460b3cSDennis Zhou (Facebook) #include <linux/lcm.h> 67fbf59bc9STejun Heo #include <linux/list.h> 68a530b795STejun Heo #include <linux/log2.h> 69fbf59bc9STejun Heo #include <linux/mm.h> 70fbf59bc9STejun Heo #include <linux/module.h> 71fbf59bc9STejun Heo #include <linux/mutex.h> 72fbf59bc9STejun Heo #include <linux/percpu.h> 73fbf59bc9STejun Heo #include <linux/pfn.h> 74fbf59bc9STejun Heo #include <linux/slab.h> 75ccea34b5STejun Heo #include <linux/spinlock.h> 76fbf59bc9STejun Heo #include <linux/vmalloc.h> 77a56dbddfSTejun Heo #include <linux/workqueue.h> 78f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 79fbf59bc9STejun Heo 80fbf59bc9STejun Heo #include <asm/cacheflush.h> 81e0100983STejun Heo #include <asm/sections.h> 82fbf59bc9STejun Heo #include <asm/tlbflush.h> 833b034b0dSVivek Goyal #include <asm/io.h> 84fbf59bc9STejun Heo 85df95e795SDennis Zhou #define CREATE_TRACE_POINTS 86df95e795SDennis Zhou #include <trace/events/percpu.h> 87df95e795SDennis Zhou 888fa3ed80SDennis Zhou #include "percpu-internal.h" 898fa3ed80SDennis Zhou 9040064aecSDennis Zhou (Facebook) /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */ 9140064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT 5 9240064aecSDennis Zhou (Facebook) 931a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 941a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 95fbf59bc9STejun Heo 96bbddff05STejun Heo #ifdef CONFIG_SMP 97e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 98e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 99e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 10043cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 10143cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 10243cf38ebSTejun Heo (unsigned long)__per_cpu_start) 103e0100983STejun Heo #endif 104e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 105e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 10643cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 10743cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 10843cf38ebSTejun Heo (unsigned long)__per_cpu_start) 109e0100983STejun Heo #endif 110bbddff05STejun Heo #else /* CONFIG_SMP */ 111bbddff05STejun Heo /* on UP, it's always identity mapped */ 112bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 113bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 114bbddff05STejun Heo #endif /* CONFIG_SMP */ 115e0100983STejun Heo 1161328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1171328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1181328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1191328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1208fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1211328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 122fbf59bc9STejun Heo 123a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1241328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1251328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1262f39e637STejun Heo 127fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1281328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 129fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 130fbf59bc9STejun Heo 1311328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1321328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1332f39e637STejun Heo 1346563297cSTejun Heo /* group information, used for vm allocation */ 1351328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1361328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1371328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1386563297cSTejun Heo 139ae9e6bc9STejun Heo /* 140ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 141ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 142ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 143ae9e6bc9STejun Heo */ 1448fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 145ae9e6bc9STejun Heo 146ae9e6bc9STejun Heo /* 147ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 148e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved 149e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL. 150ae9e6bc9STejun Heo */ 1518fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 152edcb4639STejun Heo 1538fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1546710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 155fbf59bc9STejun Heo 1568fa3ed80SDennis Zhou struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ 157fbf59bc9STejun Heo 1584f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */ 1594f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks); 1604f996e23STejun Heo 161b539b87fSTejun Heo /* 162b539b87fSTejun Heo * The number of empty populated pages, protected by pcpu_lock. The 163b539b87fSTejun Heo * reserved chunk doesn't contribute to the count. 164b539b87fSTejun Heo */ 1656b9b6f39SDennis Zhou (Facebook) int pcpu_nr_empty_pop_pages; 166b539b87fSTejun Heo 1671a4d7607STejun Heo /* 1681a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1691a4d7607STejun Heo * try to keep the number of populated free pages between 1701a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1711a4d7607STejun Heo * empty chunk. 1721a4d7607STejun Heo */ 173fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 174fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 1751a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 1761a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 1771a4d7607STejun Heo 1781a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 1791a4d7607STejun Heo { 1801a4d7607STejun Heo if (pcpu_async_enabled) 1811a4d7607STejun Heo schedule_work(&pcpu_balance_work); 1821a4d7607STejun Heo } 183a56dbddfSTejun Heo 184c0ebfdc3SDennis Zhou (Facebook) /** 185560f2c23SDennis Zhou (Facebook) * pcpu_addr_in_chunk - check if the address is served from this chunk 186560f2c23SDennis Zhou (Facebook) * @chunk: chunk of interest 187560f2c23SDennis Zhou (Facebook) * @addr: percpu address 188c0ebfdc3SDennis Zhou (Facebook) * 189c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 190560f2c23SDennis Zhou (Facebook) * True if the address is served from this chunk. 191c0ebfdc3SDennis Zhou (Facebook) */ 192560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 193020ec653STejun Heo { 194c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr; 195020ec653STejun Heo 196560f2c23SDennis Zhou (Facebook) if (!chunk) 197c0ebfdc3SDennis Zhou (Facebook) return false; 198c0ebfdc3SDennis Zhou (Facebook) 199560f2c23SDennis Zhou (Facebook) start_addr = chunk->base_addr + chunk->start_offset; 200560f2c23SDennis Zhou (Facebook) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 201560f2c23SDennis Zhou (Facebook) chunk->end_offset; 202c0ebfdc3SDennis Zhou (Facebook) 203c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 204020ec653STejun Heo } 205020ec653STejun Heo 206d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 207fbf59bc9STejun Heo { 208cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 209fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 210fbf59bc9STejun Heo } 211fbf59bc9STejun Heo 212d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 213d9b55eebSTejun Heo { 214d9b55eebSTejun Heo if (size == pcpu_unit_size) 215d9b55eebSTejun Heo return pcpu_nr_slots - 1; 216d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 217d9b55eebSTejun Heo } 218d9b55eebSTejun Heo 219fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 220fbf59bc9STejun Heo { 22140064aecSDennis Zhou (Facebook) if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk->contig_bits == 0) 222fbf59bc9STejun Heo return 0; 223fbf59bc9STejun Heo 22440064aecSDennis Zhou (Facebook) return pcpu_size_to_slot(chunk->free_bytes); 225fbf59bc9STejun Heo } 226fbf59bc9STejun Heo 22788999a89STejun Heo /* set the pointer to a chunk in a page struct */ 22888999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 22988999a89STejun Heo { 23088999a89STejun Heo page->index = (unsigned long)pcpu; 23188999a89STejun Heo } 23288999a89STejun Heo 23388999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 23488999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 23588999a89STejun Heo { 23688999a89STejun Heo return (struct pcpu_chunk *)page->index; 23788999a89STejun Heo } 23888999a89STejun Heo 23988999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 240fbf59bc9STejun Heo { 2412f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 242fbf59bc9STejun Heo } 243fbf59bc9STejun Heo 244c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 245c0ebfdc3SDennis Zhou (Facebook) { 246c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 247c0ebfdc3SDennis Zhou (Facebook) } 248c0ebfdc3SDennis Zhou (Facebook) 2499983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 250fbf59bc9STejun Heo unsigned int cpu, int page_idx) 251fbf59bc9STejun Heo { 252c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr + 253c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx); 254fbf59bc9STejun Heo } 255fbf59bc9STejun Heo 25691e914c5SDennis Zhou (Facebook) static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end) 257ce3141a2STejun Heo { 25891e914c5SDennis Zhou (Facebook) *rs = find_next_zero_bit(bitmap, end, *rs); 25991e914c5SDennis Zhou (Facebook) *re = find_next_bit(bitmap, end, *rs + 1); 260ce3141a2STejun Heo } 261ce3141a2STejun Heo 26291e914c5SDennis Zhou (Facebook) static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end) 263ce3141a2STejun Heo { 26491e914c5SDennis Zhou (Facebook) *rs = find_next_bit(bitmap, end, *rs); 26591e914c5SDennis Zhou (Facebook) *re = find_next_zero_bit(bitmap, end, *rs + 1); 266ce3141a2STejun Heo } 267ce3141a2STejun Heo 268ce3141a2STejun Heo /* 26991e914c5SDennis Zhou (Facebook) * Bitmap region iterators. Iterates over the bitmap between 27091e914c5SDennis Zhou (Facebook) * [@start, @end) in @chunk. @rs and @re should be integer variables 27191e914c5SDennis Zhou (Facebook) * and will be set to start and end index of the current free region. 272ce3141a2STejun Heo */ 27391e914c5SDennis Zhou (Facebook) #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ 27491e914c5SDennis Zhou (Facebook) for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \ 275ce3141a2STejun Heo (rs) < (re); \ 27691e914c5SDennis Zhou (Facebook) (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end))) 277ce3141a2STejun Heo 27891e914c5SDennis Zhou (Facebook) #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ 27991e914c5SDennis Zhou (Facebook) for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \ 280ce3141a2STejun Heo (rs) < (re); \ 28191e914c5SDennis Zhou (Facebook) (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end))) 282ce3141a2STejun Heo 283ca460b3cSDennis Zhou (Facebook) /* 284ca460b3cSDennis Zhou (Facebook) * The following are helper functions to help access bitmaps and convert 285ca460b3cSDennis Zhou (Facebook) * between bitmap offsets to address offsets. 286ca460b3cSDennis Zhou (Facebook) */ 287ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 288ca460b3cSDennis Zhou (Facebook) { 289ca460b3cSDennis Zhou (Facebook) return chunk->alloc_map + 290ca460b3cSDennis Zhou (Facebook) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 291ca460b3cSDennis Zhou (Facebook) } 292ca460b3cSDennis Zhou (Facebook) 293ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off) 294ca460b3cSDennis Zhou (Facebook) { 295ca460b3cSDennis Zhou (Facebook) return off / PCPU_BITMAP_BLOCK_BITS; 296ca460b3cSDennis Zhou (Facebook) } 297ca460b3cSDennis Zhou (Facebook) 298ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off) 299ca460b3cSDennis Zhou (Facebook) { 300ca460b3cSDennis Zhou (Facebook) return off & (PCPU_BITMAP_BLOCK_BITS - 1); 301ca460b3cSDennis Zhou (Facebook) } 302ca460b3cSDennis Zhou (Facebook) 303b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off) 304b185cd0dSDennis Zhou (Facebook) { 305b185cd0dSDennis Zhou (Facebook) return index * PCPU_BITMAP_BLOCK_BITS + off; 306b185cd0dSDennis Zhou (Facebook) } 307b185cd0dSDennis Zhou (Facebook) 308fbf59bc9STejun Heo /** 309525ca84dSDennis Zhou (Facebook) * pcpu_next_md_free_region - finds the next hint free area 310525ca84dSDennis Zhou (Facebook) * @chunk: chunk of interest 311525ca84dSDennis Zhou (Facebook) * @bit_off: chunk offset 312525ca84dSDennis Zhou (Facebook) * @bits: size of free area 313525ca84dSDennis Zhou (Facebook) * 314525ca84dSDennis Zhou (Facebook) * Helper function for pcpu_for_each_md_free_region. It checks 315525ca84dSDennis Zhou (Facebook) * block->contig_hint and performs aggregation across blocks to find the 316525ca84dSDennis Zhou (Facebook) * next hint. It modifies bit_off and bits in-place to be consumed in the 317525ca84dSDennis Zhou (Facebook) * loop. 318525ca84dSDennis Zhou (Facebook) */ 319525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 320525ca84dSDennis Zhou (Facebook) int *bits) 321525ca84dSDennis Zhou (Facebook) { 322525ca84dSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 323525ca84dSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 324525ca84dSDennis Zhou (Facebook) struct pcpu_block_md *block; 325525ca84dSDennis Zhou (Facebook) 326525ca84dSDennis Zhou (Facebook) *bits = 0; 327525ca84dSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 328525ca84dSDennis Zhou (Facebook) block++, i++) { 329525ca84dSDennis Zhou (Facebook) /* handles contig area across blocks */ 330525ca84dSDennis Zhou (Facebook) if (*bits) { 331525ca84dSDennis Zhou (Facebook) *bits += block->left_free; 332525ca84dSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 333525ca84dSDennis Zhou (Facebook) continue; 334525ca84dSDennis Zhou (Facebook) return; 335525ca84dSDennis Zhou (Facebook) } 336525ca84dSDennis Zhou (Facebook) 337525ca84dSDennis Zhou (Facebook) /* 338525ca84dSDennis Zhou (Facebook) * This checks three things. First is there a contig_hint to 339525ca84dSDennis Zhou (Facebook) * check. Second, have we checked this hint before by 340525ca84dSDennis Zhou (Facebook) * comparing the block_off. Third, is this the same as the 341525ca84dSDennis Zhou (Facebook) * right contig hint. In the last case, it spills over into 342525ca84dSDennis Zhou (Facebook) * the next block and should be handled by the contig area 343525ca84dSDennis Zhou (Facebook) * across blocks code. 344525ca84dSDennis Zhou (Facebook) */ 345525ca84dSDennis Zhou (Facebook) *bits = block->contig_hint; 346525ca84dSDennis Zhou (Facebook) if (*bits && block->contig_hint_start >= block_off && 347525ca84dSDennis Zhou (Facebook) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 348525ca84dSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, 349525ca84dSDennis Zhou (Facebook) block->contig_hint_start); 350525ca84dSDennis Zhou (Facebook) return; 351525ca84dSDennis Zhou (Facebook) } 352525ca84dSDennis Zhou (Facebook) 353525ca84dSDennis Zhou (Facebook) *bits = block->right_free; 354525ca84dSDennis Zhou (Facebook) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 355525ca84dSDennis Zhou (Facebook) } 356525ca84dSDennis Zhou (Facebook) } 357525ca84dSDennis Zhou (Facebook) 358*b4c2116cSDennis Zhou (Facebook) /** 359*b4c2116cSDennis Zhou (Facebook) * pcpu_next_fit_region - finds fit areas for a given allocation request 360*b4c2116cSDennis Zhou (Facebook) * @chunk: chunk of interest 361*b4c2116cSDennis Zhou (Facebook) * @alloc_bits: size of allocation 362*b4c2116cSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 363*b4c2116cSDennis Zhou (Facebook) * @bit_off: chunk offset 364*b4c2116cSDennis Zhou (Facebook) * @bits: size of free area 365*b4c2116cSDennis Zhou (Facebook) * 366*b4c2116cSDennis Zhou (Facebook) * Finds the next free region that is viable for use with a given size and 367*b4c2116cSDennis Zhou (Facebook) * alignment. This only returns if there is a valid area to be used for this 368*b4c2116cSDennis Zhou (Facebook) * allocation. block->first_free is returned if the allocation request fits 369*b4c2116cSDennis Zhou (Facebook) * within the block to see if the request can be fulfilled prior to the contig 370*b4c2116cSDennis Zhou (Facebook) * hint. 371*b4c2116cSDennis Zhou (Facebook) */ 372*b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 373*b4c2116cSDennis Zhou (Facebook) int align, int *bit_off, int *bits) 374*b4c2116cSDennis Zhou (Facebook) { 375*b4c2116cSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 376*b4c2116cSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 377*b4c2116cSDennis Zhou (Facebook) struct pcpu_block_md *block; 378*b4c2116cSDennis Zhou (Facebook) 379*b4c2116cSDennis Zhou (Facebook) *bits = 0; 380*b4c2116cSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 381*b4c2116cSDennis Zhou (Facebook) block++, i++) { 382*b4c2116cSDennis Zhou (Facebook) /* handles contig area across blocks */ 383*b4c2116cSDennis Zhou (Facebook) if (*bits) { 384*b4c2116cSDennis Zhou (Facebook) *bits += block->left_free; 385*b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 386*b4c2116cSDennis Zhou (Facebook) return; 387*b4c2116cSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 388*b4c2116cSDennis Zhou (Facebook) continue; 389*b4c2116cSDennis Zhou (Facebook) } 390*b4c2116cSDennis Zhou (Facebook) 391*b4c2116cSDennis Zhou (Facebook) /* check block->contig_hint */ 392*b4c2116cSDennis Zhou (Facebook) *bits = ALIGN(block->contig_hint_start, align) - 393*b4c2116cSDennis Zhou (Facebook) block->contig_hint_start; 394*b4c2116cSDennis Zhou (Facebook) /* 395*b4c2116cSDennis Zhou (Facebook) * This uses the block offset to determine if this has been 396*b4c2116cSDennis Zhou (Facebook) * checked in the prior iteration. 397*b4c2116cSDennis Zhou (Facebook) */ 398*b4c2116cSDennis Zhou (Facebook) if (block->contig_hint && 399*b4c2116cSDennis Zhou (Facebook) block->contig_hint_start >= block_off && 400*b4c2116cSDennis Zhou (Facebook) block->contig_hint >= *bits + alloc_bits) { 401*b4c2116cSDennis Zhou (Facebook) *bits += alloc_bits + block->contig_hint_start - 402*b4c2116cSDennis Zhou (Facebook) block->first_free; 403*b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, block->first_free); 404*b4c2116cSDennis Zhou (Facebook) return; 405*b4c2116cSDennis Zhou (Facebook) } 406*b4c2116cSDennis Zhou (Facebook) 407*b4c2116cSDennis Zhou (Facebook) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 408*b4c2116cSDennis Zhou (Facebook) align); 409*b4c2116cSDennis Zhou (Facebook) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 410*b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, *bit_off); 411*b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 412*b4c2116cSDennis Zhou (Facebook) return; 413*b4c2116cSDennis Zhou (Facebook) } 414*b4c2116cSDennis Zhou (Facebook) 415*b4c2116cSDennis Zhou (Facebook) /* no valid offsets were found - fail condition */ 416*b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_chunk_map_bits(chunk); 417*b4c2116cSDennis Zhou (Facebook) } 418*b4c2116cSDennis Zhou (Facebook) 419525ca84dSDennis Zhou (Facebook) /* 420525ca84dSDennis Zhou (Facebook) * Metadata free area iterators. These perform aggregation of free areas 421525ca84dSDennis Zhou (Facebook) * based on the metadata blocks and return the offset @bit_off and size in 422*b4c2116cSDennis Zhou (Facebook) * bits of the free area @bits. pcpu_for_each_fit_region only returns when 423*b4c2116cSDennis Zhou (Facebook) * a fit is found for the allocation request. 424525ca84dSDennis Zhou (Facebook) */ 425525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 426525ca84dSDennis Zhou (Facebook) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 427525ca84dSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 428525ca84dSDennis Zhou (Facebook) (bit_off) += (bits) + 1, \ 429525ca84dSDennis Zhou (Facebook) pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 430525ca84dSDennis Zhou (Facebook) 431*b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 432*b4c2116cSDennis Zhou (Facebook) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 433*b4c2116cSDennis Zhou (Facebook) &(bits)); \ 434*b4c2116cSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 435*b4c2116cSDennis Zhou (Facebook) (bit_off) += (bits), \ 436*b4c2116cSDennis Zhou (Facebook) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 437*b4c2116cSDennis Zhou (Facebook) &(bits))) 438*b4c2116cSDennis Zhou (Facebook) 439525ca84dSDennis Zhou (Facebook) /** 44090459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 4411880d93bSTejun Heo * @size: bytes to allocate 442fbf59bc9STejun Heo * 4431880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 44490459ce0SBob Liu * kzalloc() is used; otherwise, vzalloc() is used. The returned 4451880d93bSTejun Heo * memory is always zeroed. 446fbf59bc9STejun Heo * 447ccea34b5STejun Heo * CONTEXT: 448ccea34b5STejun Heo * Does GFP_KERNEL allocation. 449ccea34b5STejun Heo * 450fbf59bc9STejun Heo * RETURNS: 4511880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 452fbf59bc9STejun Heo */ 45390459ce0SBob Liu static void *pcpu_mem_zalloc(size_t size) 454fbf59bc9STejun Heo { 455099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 456099a19d9STejun Heo return NULL; 457099a19d9STejun Heo 458fbf59bc9STejun Heo if (size <= PAGE_SIZE) 4591880d93bSTejun Heo return kzalloc(size, GFP_KERNEL); 4607af4c093SJesper Juhl else 4617af4c093SJesper Juhl return vzalloc(size); 4621880d93bSTejun Heo } 463fbf59bc9STejun Heo 4641880d93bSTejun Heo /** 4651880d93bSTejun Heo * pcpu_mem_free - free memory 4661880d93bSTejun Heo * @ptr: memory to free 4671880d93bSTejun Heo * 46890459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 4691880d93bSTejun Heo */ 4701d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 4711880d93bSTejun Heo { 4721d5cfdb0STetsuo Handa kvfree(ptr); 473fbf59bc9STejun Heo } 474fbf59bc9STejun Heo 475fbf59bc9STejun Heo /** 476fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 477fbf59bc9STejun Heo * @chunk: chunk of interest 478fbf59bc9STejun Heo * @oslot: the previous slot it was on 479fbf59bc9STejun Heo * 480fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 481fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 482edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 483edcb4639STejun Heo * chunk slots. 484ccea34b5STejun Heo * 485ccea34b5STejun Heo * CONTEXT: 486ccea34b5STejun Heo * pcpu_lock. 487fbf59bc9STejun Heo */ 488fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 489fbf59bc9STejun Heo { 490fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 491fbf59bc9STejun Heo 492edcb4639STejun Heo if (chunk != pcpu_reserved_chunk && oslot != nslot) { 493fbf59bc9STejun Heo if (oslot < nslot) 494fbf59bc9STejun Heo list_move(&chunk->list, &pcpu_slot[nslot]); 495fbf59bc9STejun Heo else 496fbf59bc9STejun Heo list_move_tail(&chunk->list, &pcpu_slot[nslot]); 497fbf59bc9STejun Heo } 498fbf59bc9STejun Heo } 499fbf59bc9STejun Heo 500fbf59bc9STejun Heo /** 50140064aecSDennis Zhou (Facebook) * pcpu_cnt_pop_pages- counts populated backing pages in range 502833af842STejun Heo * @chunk: chunk of interest 50340064aecSDennis Zhou (Facebook) * @bit_off: start offset 50440064aecSDennis Zhou (Facebook) * @bits: size of area to check 5059f7dcf22STejun Heo * 50640064aecSDennis Zhou (Facebook) * Calculates the number of populated pages in the region 50740064aecSDennis Zhou (Facebook) * [page_start, page_end). This keeps track of how many empty populated 50840064aecSDennis Zhou (Facebook) * pages are available and decide if async work should be scheduled. 509ccea34b5STejun Heo * 5109f7dcf22STejun Heo * RETURNS: 51140064aecSDennis Zhou (Facebook) * The nr of populated pages. 5129f7dcf22STejun Heo */ 51340064aecSDennis Zhou (Facebook) static inline int pcpu_cnt_pop_pages(struct pcpu_chunk *chunk, int bit_off, 51440064aecSDennis Zhou (Facebook) int bits) 5159f7dcf22STejun Heo { 51640064aecSDennis Zhou (Facebook) int page_start = PFN_UP(bit_off * PCPU_MIN_ALLOC_SIZE); 51740064aecSDennis Zhou (Facebook) int page_end = PFN_DOWN((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 51840064aecSDennis Zhou (Facebook) 51940064aecSDennis Zhou (Facebook) if (page_start >= page_end) 52040064aecSDennis Zhou (Facebook) return 0; 52140064aecSDennis Zhou (Facebook) 52240064aecSDennis Zhou (Facebook) /* 52340064aecSDennis Zhou (Facebook) * bitmap_weight counts the number of bits set in a bitmap up to 52440064aecSDennis Zhou (Facebook) * the specified number of bits. This is counting the populated 52540064aecSDennis Zhou (Facebook) * pages up to page_end and then subtracting the populated pages 52640064aecSDennis Zhou (Facebook) * up to page_start to count the populated pages in 52740064aecSDennis Zhou (Facebook) * [page_start, page_end). 52840064aecSDennis Zhou (Facebook) */ 52940064aecSDennis Zhou (Facebook) return bitmap_weight(chunk->populated, page_end) - 53040064aecSDennis Zhou (Facebook) bitmap_weight(chunk->populated, page_start); 53140064aecSDennis Zhou (Facebook) } 53240064aecSDennis Zhou (Facebook) 53340064aecSDennis Zhou (Facebook) /** 53440064aecSDennis Zhou (Facebook) * pcpu_chunk_update - updates the chunk metadata given a free area 53540064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 53640064aecSDennis Zhou (Facebook) * @bit_off: chunk offset 53740064aecSDennis Zhou (Facebook) * @bits: size of free area 53840064aecSDennis Zhou (Facebook) * 53913f96637SDennis Zhou (Facebook) * This updates the chunk's contig hint and starting offset given a free area. 540268625a6SDennis Zhou (Facebook) * Choose the best starting offset if the contig hint is equal. 54140064aecSDennis Zhou (Facebook) */ 54240064aecSDennis Zhou (Facebook) static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits) 54340064aecSDennis Zhou (Facebook) { 54413f96637SDennis Zhou (Facebook) if (bits > chunk->contig_bits) { 54513f96637SDennis Zhou (Facebook) chunk->contig_bits_start = bit_off; 54640064aecSDennis Zhou (Facebook) chunk->contig_bits = bits; 547268625a6SDennis Zhou (Facebook) } else if (bits == chunk->contig_bits && chunk->contig_bits_start && 548268625a6SDennis Zhou (Facebook) (!bit_off || 549268625a6SDennis Zhou (Facebook) __ffs(bit_off) > __ffs(chunk->contig_bits_start))) { 550268625a6SDennis Zhou (Facebook) /* use the start with the best alignment */ 551268625a6SDennis Zhou (Facebook) chunk->contig_bits_start = bit_off; 55240064aecSDennis Zhou (Facebook) } 55313f96637SDennis Zhou (Facebook) } 55440064aecSDennis Zhou (Facebook) 55540064aecSDennis Zhou (Facebook) /** 55640064aecSDennis Zhou (Facebook) * pcpu_chunk_refresh_hint - updates metadata about a chunk 55740064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 55840064aecSDennis Zhou (Facebook) * 559525ca84dSDennis Zhou (Facebook) * Iterates over the metadata blocks to find the largest contig area. 560525ca84dSDennis Zhou (Facebook) * It also counts the populated pages and uses the delta to update the 561525ca84dSDennis Zhou (Facebook) * global count. 56240064aecSDennis Zhou (Facebook) * 56340064aecSDennis Zhou (Facebook) * Updates: 56440064aecSDennis Zhou (Facebook) * chunk->contig_bits 56513f96637SDennis Zhou (Facebook) * chunk->contig_bits_start 566525ca84dSDennis Zhou (Facebook) * nr_empty_pop_pages (chunk and global) 56740064aecSDennis Zhou (Facebook) */ 56840064aecSDennis Zhou (Facebook) static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk) 56940064aecSDennis Zhou (Facebook) { 570525ca84dSDennis Zhou (Facebook) int bit_off, bits, nr_empty_pop_pages; 57140064aecSDennis Zhou (Facebook) 57240064aecSDennis Zhou (Facebook) /* clear metadata */ 57340064aecSDennis Zhou (Facebook) chunk->contig_bits = 0; 57440064aecSDennis Zhou (Facebook) 575525ca84dSDennis Zhou (Facebook) bit_off = chunk->first_bit; 57640064aecSDennis Zhou (Facebook) bits = nr_empty_pop_pages = 0; 577525ca84dSDennis Zhou (Facebook) pcpu_for_each_md_free_region(chunk, bit_off, bits) { 578525ca84dSDennis Zhou (Facebook) pcpu_chunk_update(chunk, bit_off, bits); 57940064aecSDennis Zhou (Facebook) 580525ca84dSDennis Zhou (Facebook) nr_empty_pop_pages += pcpu_cnt_pop_pages(chunk, bit_off, bits); 58140064aecSDennis Zhou (Facebook) } 58240064aecSDennis Zhou (Facebook) 58340064aecSDennis Zhou (Facebook) /* 58440064aecSDennis Zhou (Facebook) * Keep track of nr_empty_pop_pages. 58540064aecSDennis Zhou (Facebook) * 58640064aecSDennis Zhou (Facebook) * The chunk maintains the previous number of free pages it held, 58740064aecSDennis Zhou (Facebook) * so the delta is used to update the global counter. The reserved 58840064aecSDennis Zhou (Facebook) * chunk is not part of the free page count as they are populated 58940064aecSDennis Zhou (Facebook) * at init and are special to serving reserved allocations. 59040064aecSDennis Zhou (Facebook) */ 59140064aecSDennis Zhou (Facebook) if (chunk != pcpu_reserved_chunk) 59240064aecSDennis Zhou (Facebook) pcpu_nr_empty_pop_pages += 59340064aecSDennis Zhou (Facebook) (nr_empty_pop_pages - chunk->nr_empty_pop_pages); 59440064aecSDennis Zhou (Facebook) 59540064aecSDennis Zhou (Facebook) chunk->nr_empty_pop_pages = nr_empty_pop_pages; 59640064aecSDennis Zhou (Facebook) } 59740064aecSDennis Zhou (Facebook) 59840064aecSDennis Zhou (Facebook) /** 599ca460b3cSDennis Zhou (Facebook) * pcpu_block_update - updates a block given a free area 600ca460b3cSDennis Zhou (Facebook) * @block: block of interest 601ca460b3cSDennis Zhou (Facebook) * @start: start offset in block 602ca460b3cSDennis Zhou (Facebook) * @end: end offset in block 603ca460b3cSDennis Zhou (Facebook) * 604ca460b3cSDennis Zhou (Facebook) * Updates a block given a known free area. The region [start, end) is 605268625a6SDennis Zhou (Facebook) * expected to be the entirety of the free area within a block. Chooses 606268625a6SDennis Zhou (Facebook) * the best starting offset if the contig hints are equal. 607ca460b3cSDennis Zhou (Facebook) */ 608ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 609ca460b3cSDennis Zhou (Facebook) { 610ca460b3cSDennis Zhou (Facebook) int contig = end - start; 611ca460b3cSDennis Zhou (Facebook) 612ca460b3cSDennis Zhou (Facebook) block->first_free = min(block->first_free, start); 613ca460b3cSDennis Zhou (Facebook) if (start == 0) 614ca460b3cSDennis Zhou (Facebook) block->left_free = contig; 615ca460b3cSDennis Zhou (Facebook) 616ca460b3cSDennis Zhou (Facebook) if (end == PCPU_BITMAP_BLOCK_BITS) 617ca460b3cSDennis Zhou (Facebook) block->right_free = contig; 618ca460b3cSDennis Zhou (Facebook) 619ca460b3cSDennis Zhou (Facebook) if (contig > block->contig_hint) { 620ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = start; 621ca460b3cSDennis Zhou (Facebook) block->contig_hint = contig; 622268625a6SDennis Zhou (Facebook) } else if (block->contig_hint_start && contig == block->contig_hint && 623268625a6SDennis Zhou (Facebook) (!start || __ffs(start) > __ffs(block->contig_hint_start))) { 624268625a6SDennis Zhou (Facebook) /* use the start with the best alignment */ 625268625a6SDennis Zhou (Facebook) block->contig_hint_start = start; 626ca460b3cSDennis Zhou (Facebook) } 627ca460b3cSDennis Zhou (Facebook) } 628ca460b3cSDennis Zhou (Facebook) 629ca460b3cSDennis Zhou (Facebook) /** 630ca460b3cSDennis Zhou (Facebook) * pcpu_block_refresh_hint 631ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 632ca460b3cSDennis Zhou (Facebook) * @index: index of the metadata block 633ca460b3cSDennis Zhou (Facebook) * 634ca460b3cSDennis Zhou (Facebook) * Scans over the block beginning at first_free and updates the block 635ca460b3cSDennis Zhou (Facebook) * metadata accordingly. 636ca460b3cSDennis Zhou (Facebook) */ 637ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 638ca460b3cSDennis Zhou (Facebook) { 639ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *block = chunk->md_blocks + index; 640ca460b3cSDennis Zhou (Facebook) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 641ca460b3cSDennis Zhou (Facebook) int rs, re; /* region start, region end */ 642ca460b3cSDennis Zhou (Facebook) 643ca460b3cSDennis Zhou (Facebook) /* clear hints */ 644ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 645ca460b3cSDennis Zhou (Facebook) block->left_free = block->right_free = 0; 646ca460b3cSDennis Zhou (Facebook) 647ca460b3cSDennis Zhou (Facebook) /* iterate over free areas and update the contig hints */ 648ca460b3cSDennis Zhou (Facebook) pcpu_for_each_unpop_region(alloc_map, rs, re, block->first_free, 649ca460b3cSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS) { 650ca460b3cSDennis Zhou (Facebook) pcpu_block_update(block, rs, re); 651ca460b3cSDennis Zhou (Facebook) } 652ca460b3cSDennis Zhou (Facebook) } 653ca460b3cSDennis Zhou (Facebook) 654ca460b3cSDennis Zhou (Facebook) /** 655ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_alloc - update hint on allocation path 656ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 657ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 658ca460b3cSDennis Zhou (Facebook) * @bits: size of request 659fc304334SDennis Zhou (Facebook) * 660fc304334SDennis Zhou (Facebook) * Updates metadata for the allocation path. The metadata only has to be 661fc304334SDennis Zhou (Facebook) * refreshed by a full scan iff the chunk's contig hint is broken. Block level 662fc304334SDennis Zhou (Facebook) * scans are required if the block's contig hint is broken. 663ca460b3cSDennis Zhou (Facebook) */ 664ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 665ca460b3cSDennis Zhou (Facebook) int bits) 666ca460b3cSDennis Zhou (Facebook) { 667ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 668ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 669ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 670ca460b3cSDennis Zhou (Facebook) 671ca460b3cSDennis Zhou (Facebook) /* 672ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 673ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 674ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 675ca460b3cSDennis Zhou (Facebook) * range. 676ca460b3cSDennis Zhou (Facebook) */ 677ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 678ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 679ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 680ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 681ca460b3cSDennis Zhou (Facebook) 682ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 683ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 684ca460b3cSDennis Zhou (Facebook) 685ca460b3cSDennis Zhou (Facebook) /* 686ca460b3cSDennis Zhou (Facebook) * Update s_block. 687fc304334SDennis Zhou (Facebook) * block->first_free must be updated if the allocation takes its place. 688fc304334SDennis Zhou (Facebook) * If the allocation breaks the contig_hint, a scan is required to 689fc304334SDennis Zhou (Facebook) * restore this hint. 690ca460b3cSDennis Zhou (Facebook) */ 691fc304334SDennis Zhou (Facebook) if (s_off == s_block->first_free) 692fc304334SDennis Zhou (Facebook) s_block->first_free = find_next_zero_bit( 693fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, s_index), 694fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, 695fc304334SDennis Zhou (Facebook) s_off + bits); 696fc304334SDennis Zhou (Facebook) 697fc304334SDennis Zhou (Facebook) if (s_off >= s_block->contig_hint_start && 698fc304334SDennis Zhou (Facebook) s_off < s_block->contig_hint_start + s_block->contig_hint) { 699fc304334SDennis Zhou (Facebook) /* block contig hint is broken - scan to fix it */ 700ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, s_index); 701fc304334SDennis Zhou (Facebook) } else { 702fc304334SDennis Zhou (Facebook) /* update left and right contig manually */ 703fc304334SDennis Zhou (Facebook) s_block->left_free = min(s_block->left_free, s_off); 704fc304334SDennis Zhou (Facebook) if (s_index == e_index) 705fc304334SDennis Zhou (Facebook) s_block->right_free = min_t(int, s_block->right_free, 706fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 707fc304334SDennis Zhou (Facebook) else 708fc304334SDennis Zhou (Facebook) s_block->right_free = 0; 709fc304334SDennis Zhou (Facebook) } 710ca460b3cSDennis Zhou (Facebook) 711ca460b3cSDennis Zhou (Facebook) /* 712ca460b3cSDennis Zhou (Facebook) * Update e_block. 713ca460b3cSDennis Zhou (Facebook) */ 714ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 715fc304334SDennis Zhou (Facebook) /* 716fc304334SDennis Zhou (Facebook) * When the allocation is across blocks, the end is along 717fc304334SDennis Zhou (Facebook) * the left part of the e_block. 718fc304334SDennis Zhou (Facebook) */ 719fc304334SDennis Zhou (Facebook) e_block->first_free = find_next_zero_bit( 720fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, e_index), 721fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, e_off); 722fc304334SDennis Zhou (Facebook) 723fc304334SDennis Zhou (Facebook) if (e_off == PCPU_BITMAP_BLOCK_BITS) { 724fc304334SDennis Zhou (Facebook) /* reset the block */ 725fc304334SDennis Zhou (Facebook) e_block++; 726fc304334SDennis Zhou (Facebook) } else { 727fc304334SDennis Zhou (Facebook) if (e_off > e_block->contig_hint_start) { 728fc304334SDennis Zhou (Facebook) /* contig hint is broken - scan to fix it */ 729ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, e_index); 730fc304334SDennis Zhou (Facebook) } else { 731fc304334SDennis Zhou (Facebook) e_block->left_free = 0; 732fc304334SDennis Zhou (Facebook) e_block->right_free = 733fc304334SDennis Zhou (Facebook) min_t(int, e_block->right_free, 734fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 735fc304334SDennis Zhou (Facebook) } 736fc304334SDennis Zhou (Facebook) } 737ca460b3cSDennis Zhou (Facebook) 738ca460b3cSDennis Zhou (Facebook) /* update in-between md_blocks */ 739ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 740ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 741ca460b3cSDennis Zhou (Facebook) block->left_free = 0; 742ca460b3cSDennis Zhou (Facebook) block->right_free = 0; 743ca460b3cSDennis Zhou (Facebook) } 744ca460b3cSDennis Zhou (Facebook) } 745ca460b3cSDennis Zhou (Facebook) 746fc304334SDennis Zhou (Facebook) /* 747fc304334SDennis Zhou (Facebook) * The only time a full chunk scan is required is if the chunk 748fc304334SDennis Zhou (Facebook) * contig hint is broken. Otherwise, it means a smaller space 749fc304334SDennis Zhou (Facebook) * was used and therefore the chunk contig hint is still correct. 750fc304334SDennis Zhou (Facebook) */ 751fc304334SDennis Zhou (Facebook) if (bit_off >= chunk->contig_bits_start && 752fc304334SDennis Zhou (Facebook) bit_off < chunk->contig_bits_start + chunk->contig_bits) 753ca460b3cSDennis Zhou (Facebook) pcpu_chunk_refresh_hint(chunk); 754ca460b3cSDennis Zhou (Facebook) } 755ca460b3cSDennis Zhou (Facebook) 756ca460b3cSDennis Zhou (Facebook) /** 757ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_free - updates the block hints on the free path 758ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 759ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 760ca460b3cSDennis Zhou (Facebook) * @bits: size of request 761b185cd0dSDennis Zhou (Facebook) * 762b185cd0dSDennis Zhou (Facebook) * Updates metadata for the allocation path. This avoids a blind block 763b185cd0dSDennis Zhou (Facebook) * refresh by making use of the block contig hints. If this fails, it scans 764b185cd0dSDennis Zhou (Facebook) * forward and backward to determine the extent of the free area. This is 765b185cd0dSDennis Zhou (Facebook) * capped at the boundary of blocks. 766b185cd0dSDennis Zhou (Facebook) * 767b185cd0dSDennis Zhou (Facebook) * A chunk update is triggered if a page becomes free, a block becomes free, 768b185cd0dSDennis Zhou (Facebook) * or the free spans across blocks. This tradeoff is to minimize iterating 769b185cd0dSDennis Zhou (Facebook) * over the block metadata to update chunk->contig_bits. chunk->contig_bits 770b185cd0dSDennis Zhou (Facebook) * may be off by up to a page, but it will never be more than the available 771b185cd0dSDennis Zhou (Facebook) * space. If the contig hint is contained in one block, it will be accurate. 772ca460b3cSDennis Zhou (Facebook) */ 773ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 774ca460b3cSDennis Zhou (Facebook) int bits) 775ca460b3cSDennis Zhou (Facebook) { 776ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 777ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 778ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 779b185cd0dSDennis Zhou (Facebook) int start, end; /* start and end of the whole free area */ 780ca460b3cSDennis Zhou (Facebook) 781ca460b3cSDennis Zhou (Facebook) /* 782ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 783ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 784ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 785ca460b3cSDennis Zhou (Facebook) * range. 786ca460b3cSDennis Zhou (Facebook) */ 787ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 788ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 789ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 790ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 791ca460b3cSDennis Zhou (Facebook) 792ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 793ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 794ca460b3cSDennis Zhou (Facebook) 795b185cd0dSDennis Zhou (Facebook) /* 796b185cd0dSDennis Zhou (Facebook) * Check if the freed area aligns with the block->contig_hint. 797b185cd0dSDennis Zhou (Facebook) * If it does, then the scan to find the beginning/end of the 798b185cd0dSDennis Zhou (Facebook) * larger free area can be avoided. 799b185cd0dSDennis Zhou (Facebook) * 800b185cd0dSDennis Zhou (Facebook) * start and end refer to beginning and end of the free area 801b185cd0dSDennis Zhou (Facebook) * within each their respective blocks. This is not necessarily 802b185cd0dSDennis Zhou (Facebook) * the entire free area as it may span blocks past the beginning 803b185cd0dSDennis Zhou (Facebook) * or end of the block. 804b185cd0dSDennis Zhou (Facebook) */ 805b185cd0dSDennis Zhou (Facebook) start = s_off; 806b185cd0dSDennis Zhou (Facebook) if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 807b185cd0dSDennis Zhou (Facebook) start = s_block->contig_hint_start; 808b185cd0dSDennis Zhou (Facebook) } else { 809b185cd0dSDennis Zhou (Facebook) /* 810b185cd0dSDennis Zhou (Facebook) * Scan backwards to find the extent of the free area. 811b185cd0dSDennis Zhou (Facebook) * find_last_bit returns the starting bit, so if the start bit 812b185cd0dSDennis Zhou (Facebook) * is returned, that means there was no last bit and the 813b185cd0dSDennis Zhou (Facebook) * remainder of the chunk is free. 814b185cd0dSDennis Zhou (Facebook) */ 815b185cd0dSDennis Zhou (Facebook) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 816b185cd0dSDennis Zhou (Facebook) start); 817b185cd0dSDennis Zhou (Facebook) start = (start == l_bit) ? 0 : l_bit + 1; 818b185cd0dSDennis Zhou (Facebook) } 819b185cd0dSDennis Zhou (Facebook) 820b185cd0dSDennis Zhou (Facebook) end = e_off; 821b185cd0dSDennis Zhou (Facebook) if (e_off == e_block->contig_hint_start) 822b185cd0dSDennis Zhou (Facebook) end = e_block->contig_hint_start + e_block->contig_hint; 823b185cd0dSDennis Zhou (Facebook) else 824b185cd0dSDennis Zhou (Facebook) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 825b185cd0dSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, end); 826b185cd0dSDennis Zhou (Facebook) 827ca460b3cSDennis Zhou (Facebook) /* update s_block */ 828b185cd0dSDennis Zhou (Facebook) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 829b185cd0dSDennis Zhou (Facebook) pcpu_block_update(s_block, start, e_off); 830ca460b3cSDennis Zhou (Facebook) 831ca460b3cSDennis Zhou (Facebook) /* freeing in the same block */ 832ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 833ca460b3cSDennis Zhou (Facebook) /* update e_block */ 834b185cd0dSDennis Zhou (Facebook) pcpu_block_update(e_block, 0, end); 835ca460b3cSDennis Zhou (Facebook) 836ca460b3cSDennis Zhou (Facebook) /* reset md_blocks in the middle */ 837ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 838ca460b3cSDennis Zhou (Facebook) block->first_free = 0; 839ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = 0; 840ca460b3cSDennis Zhou (Facebook) block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 841ca460b3cSDennis Zhou (Facebook) block->left_free = PCPU_BITMAP_BLOCK_BITS; 842ca460b3cSDennis Zhou (Facebook) block->right_free = PCPU_BITMAP_BLOCK_BITS; 843ca460b3cSDennis Zhou (Facebook) } 844ca460b3cSDennis Zhou (Facebook) } 845ca460b3cSDennis Zhou (Facebook) 846b185cd0dSDennis Zhou (Facebook) /* 847b185cd0dSDennis Zhou (Facebook) * Refresh chunk metadata when the free makes a page free, a block 848b185cd0dSDennis Zhou (Facebook) * free, or spans across blocks. The contig hint may be off by up to 849b185cd0dSDennis Zhou (Facebook) * a page, but if the hint is contained in a block, it will be accurate 850b185cd0dSDennis Zhou (Facebook) * with the else condition below. 851b185cd0dSDennis Zhou (Facebook) */ 852b185cd0dSDennis Zhou (Facebook) if ((ALIGN_DOWN(end, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS)) > 853b185cd0dSDennis Zhou (Facebook) ALIGN(start, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS))) || 854b185cd0dSDennis Zhou (Facebook) s_index != e_index) 855ca460b3cSDennis Zhou (Facebook) pcpu_chunk_refresh_hint(chunk); 856b185cd0dSDennis Zhou (Facebook) else 857b185cd0dSDennis Zhou (Facebook) pcpu_chunk_update(chunk, pcpu_block_off_to_off(s_index, start), 858b185cd0dSDennis Zhou (Facebook) s_block->contig_hint); 859ca460b3cSDennis Zhou (Facebook) } 860ca460b3cSDennis Zhou (Facebook) 861ca460b3cSDennis Zhou (Facebook) /** 86240064aecSDennis Zhou (Facebook) * pcpu_is_populated - determines if the region is populated 86340064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 86440064aecSDennis Zhou (Facebook) * @bit_off: chunk offset 86540064aecSDennis Zhou (Facebook) * @bits: size of area 86640064aecSDennis Zhou (Facebook) * @next_off: return value for the next offset to start searching 86740064aecSDennis Zhou (Facebook) * 86840064aecSDennis Zhou (Facebook) * For atomic allocations, check if the backing pages are populated. 86940064aecSDennis Zhou (Facebook) * 87040064aecSDennis Zhou (Facebook) * RETURNS: 87140064aecSDennis Zhou (Facebook) * Bool if the backing pages are populated. 87240064aecSDennis Zhou (Facebook) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 87340064aecSDennis Zhou (Facebook) */ 87440064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 87540064aecSDennis Zhou (Facebook) int *next_off) 87640064aecSDennis Zhou (Facebook) { 87740064aecSDennis Zhou (Facebook) int page_start, page_end, rs, re; 87840064aecSDennis Zhou (Facebook) 87940064aecSDennis Zhou (Facebook) page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 88040064aecSDennis Zhou (Facebook) page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 88140064aecSDennis Zhou (Facebook) 88240064aecSDennis Zhou (Facebook) rs = page_start; 88340064aecSDennis Zhou (Facebook) pcpu_next_unpop(chunk->populated, &rs, &re, page_end); 88440064aecSDennis Zhou (Facebook) if (rs >= page_end) 88540064aecSDennis Zhou (Facebook) return true; 88640064aecSDennis Zhou (Facebook) 88740064aecSDennis Zhou (Facebook) *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 88840064aecSDennis Zhou (Facebook) return false; 88940064aecSDennis Zhou (Facebook) } 89040064aecSDennis Zhou (Facebook) 89140064aecSDennis Zhou (Facebook) /** 89240064aecSDennis Zhou (Facebook) * pcpu_find_block_fit - finds the block index to start searching 89340064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 89440064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 89540064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE bytes) 89640064aecSDennis Zhou (Facebook) * @pop_only: use populated regions only 89740064aecSDennis Zhou (Facebook) * 898*b4c2116cSDennis Zhou (Facebook) * Given a chunk and an allocation spec, find the offset to begin searching 899*b4c2116cSDennis Zhou (Facebook) * for a free region. This iterates over the bitmap metadata blocks to 900*b4c2116cSDennis Zhou (Facebook) * find an offset that will be guaranteed to fit the requirements. It is 901*b4c2116cSDennis Zhou (Facebook) * not quite first fit as if the allocation does not fit in the contig hint 902*b4c2116cSDennis Zhou (Facebook) * of a block or chunk, it is skipped. This errs on the side of caution 903*b4c2116cSDennis Zhou (Facebook) * to prevent excess iteration. Poor alignment can cause the allocator to 904*b4c2116cSDennis Zhou (Facebook) * skip over blocks and chunks that have valid free areas. 905*b4c2116cSDennis Zhou (Facebook) * 90640064aecSDennis Zhou (Facebook) * RETURNS: 90740064aecSDennis Zhou (Facebook) * The offset in the bitmap to begin searching. 90840064aecSDennis Zhou (Facebook) * -1 if no offset is found. 90940064aecSDennis Zhou (Facebook) */ 91040064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 91140064aecSDennis Zhou (Facebook) size_t align, bool pop_only) 91240064aecSDennis Zhou (Facebook) { 913*b4c2116cSDennis Zhou (Facebook) int bit_off, bits, next_off; 91440064aecSDennis Zhou (Facebook) 91513f96637SDennis Zhou (Facebook) /* 91613f96637SDennis Zhou (Facebook) * Check to see if the allocation can fit in the chunk's contig hint. 91713f96637SDennis Zhou (Facebook) * This is an optimization to prevent scanning by assuming if it 91813f96637SDennis Zhou (Facebook) * cannot fit in the global hint, there is memory pressure and creating 91913f96637SDennis Zhou (Facebook) * a new chunk would happen soon. 92013f96637SDennis Zhou (Facebook) */ 92113f96637SDennis Zhou (Facebook) bit_off = ALIGN(chunk->contig_bits_start, align) - 92213f96637SDennis Zhou (Facebook) chunk->contig_bits_start; 92313f96637SDennis Zhou (Facebook) if (bit_off + alloc_bits > chunk->contig_bits) 92413f96637SDennis Zhou (Facebook) return -1; 92513f96637SDennis Zhou (Facebook) 926*b4c2116cSDennis Zhou (Facebook) bit_off = chunk->first_bit; 927*b4c2116cSDennis Zhou (Facebook) bits = 0; 928*b4c2116cSDennis Zhou (Facebook) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 92940064aecSDennis Zhou (Facebook) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 930*b4c2116cSDennis Zhou (Facebook) &next_off)) 93140064aecSDennis Zhou (Facebook) break; 93240064aecSDennis Zhou (Facebook) 933*b4c2116cSDennis Zhou (Facebook) bit_off = next_off; 93440064aecSDennis Zhou (Facebook) bits = 0; 93540064aecSDennis Zhou (Facebook) } 93640064aecSDennis Zhou (Facebook) 93740064aecSDennis Zhou (Facebook) if (bit_off == pcpu_chunk_map_bits(chunk)) 93840064aecSDennis Zhou (Facebook) return -1; 93940064aecSDennis Zhou (Facebook) 94040064aecSDennis Zhou (Facebook) return bit_off; 94140064aecSDennis Zhou (Facebook) } 94240064aecSDennis Zhou (Facebook) 94340064aecSDennis Zhou (Facebook) /** 94440064aecSDennis Zhou (Facebook) * pcpu_alloc_area - allocates an area from a pcpu_chunk 94540064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 94640064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 94740064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 94840064aecSDennis Zhou (Facebook) * @start: bit_off to start searching 94940064aecSDennis Zhou (Facebook) * 95040064aecSDennis Zhou (Facebook) * This function takes in a @start offset to begin searching to fit an 951*b4c2116cSDennis Zhou (Facebook) * allocation of @alloc_bits with alignment @align. It needs to scan 952*b4c2116cSDennis Zhou (Facebook) * the allocation map because if it fits within the block's contig hint, 953*b4c2116cSDennis Zhou (Facebook) * @start will be block->first_free. This is an attempt to fill the 954*b4c2116cSDennis Zhou (Facebook) * allocation prior to breaking the contig hint. The allocation and 955*b4c2116cSDennis Zhou (Facebook) * boundary maps are updated accordingly if it confirms a valid 956*b4c2116cSDennis Zhou (Facebook) * free area. 95740064aecSDennis Zhou (Facebook) * 95840064aecSDennis Zhou (Facebook) * RETURNS: 95940064aecSDennis Zhou (Facebook) * Allocated addr offset in @chunk on success. 96040064aecSDennis Zhou (Facebook) * -1 if no matching area is found. 96140064aecSDennis Zhou (Facebook) */ 96240064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 96340064aecSDennis Zhou (Facebook) size_t align, int start) 96440064aecSDennis Zhou (Facebook) { 96540064aecSDennis Zhou (Facebook) size_t align_mask = (align) ? (align - 1) : 0; 96640064aecSDennis Zhou (Facebook) int bit_off, end, oslot; 9679f7dcf22STejun Heo 9684f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 9694f996e23STejun Heo 97040064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 971833af842STejun Heo 972833af842STejun Heo /* 97340064aecSDennis Zhou (Facebook) * Search to find a fit. 974833af842STejun Heo */ 975*b4c2116cSDennis Zhou (Facebook) end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS; 97640064aecSDennis Zhou (Facebook) bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start, 97740064aecSDennis Zhou (Facebook) alloc_bits, align_mask); 97840064aecSDennis Zhou (Facebook) if (bit_off >= end) 979a16037c8STejun Heo return -1; 980a16037c8STejun Heo 98140064aecSDennis Zhou (Facebook) /* update alloc map */ 98240064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 983a16037c8STejun Heo 98440064aecSDennis Zhou (Facebook) /* update boundary map */ 98540064aecSDennis Zhou (Facebook) set_bit(bit_off, chunk->bound_map); 98640064aecSDennis Zhou (Facebook) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 98740064aecSDennis Zhou (Facebook) set_bit(bit_off + alloc_bits, chunk->bound_map); 988a16037c8STejun Heo 98940064aecSDennis Zhou (Facebook) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 99040064aecSDennis Zhou (Facebook) 99186b442fbSDennis Zhou (Facebook) /* update first free bit */ 99286b442fbSDennis Zhou (Facebook) if (bit_off == chunk->first_bit) 99386b442fbSDennis Zhou (Facebook) chunk->first_bit = find_next_zero_bit( 99486b442fbSDennis Zhou (Facebook) chunk->alloc_map, 99586b442fbSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk), 99686b442fbSDennis Zhou (Facebook) bit_off + alloc_bits); 99786b442fbSDennis Zhou (Facebook) 998ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 99940064aecSDennis Zhou (Facebook) 100040064aecSDennis Zhou (Facebook) pcpu_chunk_relocate(chunk, oslot); 100140064aecSDennis Zhou (Facebook) 100240064aecSDennis Zhou (Facebook) return bit_off * PCPU_MIN_ALLOC_SIZE; 1003a16037c8STejun Heo } 1004a16037c8STejun Heo 1005a16037c8STejun Heo /** 100640064aecSDennis Zhou (Facebook) * pcpu_free_area - frees the corresponding offset 1007fbf59bc9STejun Heo * @chunk: chunk of interest 100840064aecSDennis Zhou (Facebook) * @off: addr offset into chunk 1009fbf59bc9STejun Heo * 101040064aecSDennis Zhou (Facebook) * This function determines the size of an allocation to free using 101140064aecSDennis Zhou (Facebook) * the boundary bitmap and clears the allocation map. 1012fbf59bc9STejun Heo */ 101340064aecSDennis Zhou (Facebook) static void pcpu_free_area(struct pcpu_chunk *chunk, int off) 1014fbf59bc9STejun Heo { 101540064aecSDennis Zhou (Facebook) int bit_off, bits, end, oslot; 1016fbf59bc9STejun Heo 10175ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 101830a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 10195ccd30e4SDennis Zhou 102040064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1021723ad1d9SAl Viro 102240064aecSDennis Zhou (Facebook) bit_off = off / PCPU_MIN_ALLOC_SIZE; 1023fbf59bc9STejun Heo 102440064aecSDennis Zhou (Facebook) /* find end index */ 102540064aecSDennis Zhou (Facebook) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 102640064aecSDennis Zhou (Facebook) bit_off + 1); 102740064aecSDennis Zhou (Facebook) bits = end - bit_off; 102840064aecSDennis Zhou (Facebook) bitmap_clear(chunk->alloc_map, bit_off, bits); 10293d331ad7SAl Viro 103040064aecSDennis Zhou (Facebook) /* update metadata */ 103140064aecSDennis Zhou (Facebook) chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE; 1032fbf59bc9STejun Heo 103386b442fbSDennis Zhou (Facebook) /* update first free bit */ 103486b442fbSDennis Zhou (Facebook) chunk->first_bit = min(chunk->first_bit, bit_off); 103586b442fbSDennis Zhou (Facebook) 1036ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_free(chunk, bit_off, bits); 1037b539b87fSTejun Heo 1038fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 1039fbf59bc9STejun Heo } 1040fbf59bc9STejun Heo 1041ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1042ca460b3cSDennis Zhou (Facebook) { 1043ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *md_block; 1044ca460b3cSDennis Zhou (Facebook) 1045ca460b3cSDennis Zhou (Facebook) for (md_block = chunk->md_blocks; 1046ca460b3cSDennis Zhou (Facebook) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1047ca460b3cSDennis Zhou (Facebook) md_block++) { 1048ca460b3cSDennis Zhou (Facebook) md_block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 1049ca460b3cSDennis Zhou (Facebook) md_block->left_free = PCPU_BITMAP_BLOCK_BITS; 1050ca460b3cSDennis Zhou (Facebook) md_block->right_free = PCPU_BITMAP_BLOCK_BITS; 1051ca460b3cSDennis Zhou (Facebook) } 1052ca460b3cSDennis Zhou (Facebook) } 1053ca460b3cSDennis Zhou (Facebook) 105440064aecSDennis Zhou (Facebook) /** 105540064aecSDennis Zhou (Facebook) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 105640064aecSDennis Zhou (Facebook) * @tmp_addr: the start of the region served 105740064aecSDennis Zhou (Facebook) * @map_size: size of the region served 105840064aecSDennis Zhou (Facebook) * 105940064aecSDennis Zhou (Facebook) * This is responsible for creating the chunks that serve the first chunk. The 106040064aecSDennis Zhou (Facebook) * base_addr is page aligned down of @tmp_addr while the region end is page 106140064aecSDennis Zhou (Facebook) * aligned up. Offsets are kept track of to determine the region served. All 106240064aecSDennis Zhou (Facebook) * this is done to appease the bitmap allocator in avoiding partial blocks. 106340064aecSDennis Zhou (Facebook) * 106440064aecSDennis Zhou (Facebook) * RETURNS: 106540064aecSDennis Zhou (Facebook) * Chunk serving the region at @tmp_addr of @map_size. 106640064aecSDennis Zhou (Facebook) */ 1067c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 106840064aecSDennis Zhou (Facebook) int map_size) 106910edf5b0SDennis Zhou (Facebook) { 107010edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 1071ca460b3cSDennis Zhou (Facebook) unsigned long aligned_addr, lcm_align; 107240064aecSDennis Zhou (Facebook) int start_offset, offset_bits, region_size, region_bits; 1073c0ebfdc3SDennis Zhou (Facebook) 1074c0ebfdc3SDennis Zhou (Facebook) /* region calculations */ 1075c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK; 1076c0ebfdc3SDennis Zhou (Facebook) 1077c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr; 10786b9d7c8eSDennis Zhou (Facebook) 1079ca460b3cSDennis Zhou (Facebook) /* 1080ca460b3cSDennis Zhou (Facebook) * Align the end of the region with the LCM of PAGE_SIZE and 1081ca460b3cSDennis Zhou (Facebook) * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of 1082ca460b3cSDennis Zhou (Facebook) * the other. 1083ca460b3cSDennis Zhou (Facebook) */ 1084ca460b3cSDennis Zhou (Facebook) lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); 1085ca460b3cSDennis Zhou (Facebook) region_size = ALIGN(start_offset + map_size, lcm_align); 108610edf5b0SDennis Zhou (Facebook) 1087c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */ 10888ab16c43SDennis Zhou (Facebook) chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) + 10898ab16c43SDennis Zhou (Facebook) BITS_TO_LONGS(region_size >> PAGE_SHIFT), 10908ab16c43SDennis Zhou (Facebook) 0); 1091c0ebfdc3SDennis Zhou (Facebook) 109210edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list); 1093c0ebfdc3SDennis Zhou (Facebook) 1094c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr; 109510edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset; 10966b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size; 1097c0ebfdc3SDennis Zhou (Facebook) 10988ab16c43SDennis Zhou (Facebook) chunk->nr_pages = region_size >> PAGE_SHIFT; 109940064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 1100c0ebfdc3SDennis Zhou (Facebook) 1101ca460b3cSDennis Zhou (Facebook) chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) * 110240064aecSDennis Zhou (Facebook) sizeof(chunk->alloc_map[0]), 0); 1103ca460b3cSDennis Zhou (Facebook) chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) * 110440064aecSDennis Zhou (Facebook) sizeof(chunk->bound_map[0]), 0); 1105ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) * 1106ca460b3cSDennis Zhou (Facebook) sizeof(chunk->md_blocks[0]), 0); 1107ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 110810edf5b0SDennis Zhou (Facebook) 110910edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */ 111010edf5b0SDennis Zhou (Facebook) chunk->immutable = true; 11118ab16c43SDennis Zhou (Facebook) bitmap_fill(chunk->populated, chunk->nr_pages); 11128ab16c43SDennis Zhou (Facebook) chunk->nr_populated = chunk->nr_pages; 111340064aecSDennis Zhou (Facebook) chunk->nr_empty_pop_pages = 111440064aecSDennis Zhou (Facebook) pcpu_cnt_pop_pages(chunk, start_offset / PCPU_MIN_ALLOC_SIZE, 111540064aecSDennis Zhou (Facebook) map_size / PCPU_MIN_ALLOC_SIZE); 111610edf5b0SDennis Zhou (Facebook) 111740064aecSDennis Zhou (Facebook) chunk->contig_bits = map_size / PCPU_MIN_ALLOC_SIZE; 111840064aecSDennis Zhou (Facebook) chunk->free_bytes = map_size; 1119c0ebfdc3SDennis Zhou (Facebook) 1120c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) { 1121c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */ 112240064aecSDennis Zhou (Facebook) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 112340064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 0, offset_bits); 112440064aecSDennis Zhou (Facebook) set_bit(0, chunk->bound_map); 112540064aecSDennis Zhou (Facebook) set_bit(offset_bits, chunk->bound_map); 1126ca460b3cSDennis Zhou (Facebook) 112786b442fbSDennis Zhou (Facebook) chunk->first_bit = offset_bits; 112886b442fbSDennis Zhou (Facebook) 1129ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1130c0ebfdc3SDennis Zhou (Facebook) } 1131c0ebfdc3SDennis Zhou (Facebook) 11326b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) { 11336b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */ 113440064aecSDennis Zhou (Facebook) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 113540064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 113640064aecSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk) - offset_bits, 113740064aecSDennis Zhou (Facebook) offset_bits); 113840064aecSDennis Zhou (Facebook) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 113940064aecSDennis Zhou (Facebook) chunk->bound_map); 114040064aecSDennis Zhou (Facebook) set_bit(region_bits, chunk->bound_map); 11416b9d7c8eSDennis Zhou (Facebook) 1142ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1143ca460b3cSDennis Zhou (Facebook) - offset_bits, offset_bits); 1144ca460b3cSDennis Zhou (Facebook) } 114540064aecSDennis Zhou (Facebook) 114610edf5b0SDennis Zhou (Facebook) return chunk; 114710edf5b0SDennis Zhou (Facebook) } 114810edf5b0SDennis Zhou (Facebook) 11496081089fSTejun Heo static struct pcpu_chunk *pcpu_alloc_chunk(void) 11506081089fSTejun Heo { 11516081089fSTejun Heo struct pcpu_chunk *chunk; 115240064aecSDennis Zhou (Facebook) int region_bits; 11536081089fSTejun Heo 115490459ce0SBob Liu chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); 11556081089fSTejun Heo if (!chunk) 11566081089fSTejun Heo return NULL; 11576081089fSTejun Heo 11586081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 1159c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 116040064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 116140064aecSDennis Zhou (Facebook) 116240064aecSDennis Zhou (Facebook) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 116340064aecSDennis Zhou (Facebook) sizeof(chunk->alloc_map[0])); 116440064aecSDennis Zhou (Facebook) if (!chunk->alloc_map) 116540064aecSDennis Zhou (Facebook) goto alloc_map_fail; 116640064aecSDennis Zhou (Facebook) 116740064aecSDennis Zhou (Facebook) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 116840064aecSDennis Zhou (Facebook) sizeof(chunk->bound_map[0])); 116940064aecSDennis Zhou (Facebook) if (!chunk->bound_map) 117040064aecSDennis Zhou (Facebook) goto bound_map_fail; 117140064aecSDennis Zhou (Facebook) 1172ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 1173ca460b3cSDennis Zhou (Facebook) sizeof(chunk->md_blocks[0])); 1174ca460b3cSDennis Zhou (Facebook) if (!chunk->md_blocks) 1175ca460b3cSDennis Zhou (Facebook) goto md_blocks_fail; 1176ca460b3cSDennis Zhou (Facebook) 1177ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 1178ca460b3cSDennis Zhou (Facebook) 117940064aecSDennis Zhou (Facebook) /* init metadata */ 118040064aecSDennis Zhou (Facebook) chunk->contig_bits = region_bits; 118140064aecSDennis Zhou (Facebook) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1182c0ebfdc3SDennis Zhou (Facebook) 11836081089fSTejun Heo return chunk; 118440064aecSDennis Zhou (Facebook) 1185ca460b3cSDennis Zhou (Facebook) md_blocks_fail: 1186ca460b3cSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 118740064aecSDennis Zhou (Facebook) bound_map_fail: 118840064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 118940064aecSDennis Zhou (Facebook) alloc_map_fail: 119040064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk); 119140064aecSDennis Zhou (Facebook) 119240064aecSDennis Zhou (Facebook) return NULL; 11936081089fSTejun Heo } 11946081089fSTejun Heo 11956081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 11966081089fSTejun Heo { 11976081089fSTejun Heo if (!chunk) 11986081089fSTejun Heo return; 119940064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 120040064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 12011d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 12026081089fSTejun Heo } 12036081089fSTejun Heo 1204b539b87fSTejun Heo /** 1205b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 1206b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 1207b539b87fSTejun Heo * @page_start: the start page 1208b539b87fSTejun Heo * @page_end: the end page 120940064aecSDennis Zhou (Facebook) * @for_alloc: if this is to populate for allocation 1210b539b87fSTejun Heo * 1211b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1212b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 1213b539b87fSTejun Heo * successful population. 121440064aecSDennis Zhou (Facebook) * 121540064aecSDennis Zhou (Facebook) * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it 121640064aecSDennis Zhou (Facebook) * is to serve an allocation in that area. 1217b539b87fSTejun Heo */ 121840064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 121940064aecSDennis Zhou (Facebook) int page_end, bool for_alloc) 1220b539b87fSTejun Heo { 1221b539b87fSTejun Heo int nr = page_end - page_start; 1222b539b87fSTejun Heo 1223b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1224b539b87fSTejun Heo 1225b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 1226b539b87fSTejun Heo chunk->nr_populated += nr; 122740064aecSDennis Zhou (Facebook) 122840064aecSDennis Zhou (Facebook) if (!for_alloc) { 12290cecf50cSDennis Zhou (Facebook) chunk->nr_empty_pop_pages += nr; 1230b539b87fSTejun Heo pcpu_nr_empty_pop_pages += nr; 1231b539b87fSTejun Heo } 123240064aecSDennis Zhou (Facebook) } 1233b539b87fSTejun Heo 1234b539b87fSTejun Heo /** 1235b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 1236b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 1237b539b87fSTejun Heo * @page_start: the start page 1238b539b87fSTejun Heo * @page_end: the end page 1239b539b87fSTejun Heo * 1240b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1241b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 1242b539b87fSTejun Heo * each successful depopulation. 1243b539b87fSTejun Heo */ 1244b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1245b539b87fSTejun Heo int page_start, int page_end) 1246b539b87fSTejun Heo { 1247b539b87fSTejun Heo int nr = page_end - page_start; 1248b539b87fSTejun Heo 1249b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1250b539b87fSTejun Heo 1251b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 1252b539b87fSTejun Heo chunk->nr_populated -= nr; 12530cecf50cSDennis Zhou (Facebook) chunk->nr_empty_pop_pages -= nr; 1254b539b87fSTejun Heo pcpu_nr_empty_pop_pages -= nr; 1255b539b87fSTejun Heo } 1256b539b87fSTejun Heo 1257fbf59bc9STejun Heo /* 12589f645532STejun Heo * Chunk management implementation. 1259fbf59bc9STejun Heo * 12609f645532STejun Heo * To allow different implementations, chunk alloc/free and 12619f645532STejun Heo * [de]population are implemented in a separate file which is pulled 12629f645532STejun Heo * into this file and compiled together. The following functions 12639f645532STejun Heo * should be implemented. 1264ccea34b5STejun Heo * 12659f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 12669f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 12679f645532STejun Heo * pcpu_create_chunk - create a new chunk 12689f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 12699f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 12709f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1271fbf59bc9STejun Heo */ 12729f645532STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 12739f645532STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 12749f645532STejun Heo static struct pcpu_chunk *pcpu_create_chunk(void); 12759f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 12769f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 12779f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1278fbf59bc9STejun Heo 1279b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 1280b0c9778bSTejun Heo #include "percpu-km.c" 1281b0c9778bSTejun Heo #else 12829f645532STejun Heo #include "percpu-vm.c" 1283b0c9778bSTejun Heo #endif 1284fbf59bc9STejun Heo 1285fbf59bc9STejun Heo /** 128688999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 128788999a89STejun Heo * @addr: address for which the chunk needs to be determined. 128888999a89STejun Heo * 1289c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations. 1290c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator. 1291c0ebfdc3SDennis Zhou (Facebook) * 129288999a89STejun Heo * RETURNS: 129388999a89STejun Heo * The address of the found chunk. 129488999a89STejun Heo */ 129588999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 129688999a89STejun Heo { 1297c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */ 1298560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1299c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk; 1300c0ebfdc3SDennis Zhou (Facebook) 1301c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */ 1302560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 130388999a89STejun Heo return pcpu_reserved_chunk; 130488999a89STejun Heo 130588999a89STejun Heo /* 130688999a89STejun Heo * The address is relative to unit0 which might be unused and 130788999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 130888999a89STejun Heo * current processor before looking it up in the vmalloc 130988999a89STejun Heo * space. Note that any possible cpu id can be used here, so 131088999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 131188999a89STejun Heo */ 131288999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 13139f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 131488999a89STejun Heo } 131588999a89STejun Heo 131688999a89STejun Heo /** 1317edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1318cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1319fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1320edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 13215835d96eSTejun Heo * @gfp: allocation flags 1322fbf59bc9STejun Heo * 13235835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 13245835d96eSTejun Heo * contain %GFP_KERNEL, the allocation is atomic. 1325fbf59bc9STejun Heo * 1326fbf59bc9STejun Heo * RETURNS: 1327fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1328fbf59bc9STejun Heo */ 13295835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 13305835d96eSTejun Heo gfp_t gfp) 1331fbf59bc9STejun Heo { 1332f2badb0cSTejun Heo static int warn_limit = 10; 1333fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1334f2badb0cSTejun Heo const char *err; 13356ae833c7STejun Heo bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 133640064aecSDennis Zhou (Facebook) int slot, off, cpu, ret; 1337403a91b1SJiri Kosina unsigned long flags; 1338f528f0b8SCatalin Marinas void __percpu *ptr; 133940064aecSDennis Zhou (Facebook) size_t bits, bit_align; 1340fbf59bc9STejun Heo 1341723ad1d9SAl Viro /* 134240064aecSDennis Zhou (Facebook) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 134340064aecSDennis Zhou (Facebook) * therefore alignment must be a minimum of that many bytes. 134440064aecSDennis Zhou (Facebook) * An allocation may have internal fragmentation from rounding up 134540064aecSDennis Zhou (Facebook) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1346723ad1d9SAl Viro */ 1347d2f3c384SDennis Zhou (Facebook) if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1348d2f3c384SDennis Zhou (Facebook) align = PCPU_MIN_ALLOC_SIZE; 1349723ad1d9SAl Viro 1350d2f3c384SDennis Zhou (Facebook) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 135140064aecSDennis Zhou (Facebook) bits = size >> PCPU_MIN_ALLOC_SHIFT; 135240064aecSDennis Zhou (Facebook) bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 13532f69fa82SViro 13543ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 13553ca45a46Szijun_hu !is_power_of_2(align))) { 1356756a025fSJoe Perches WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1357756a025fSJoe Perches size, align); 1358fbf59bc9STejun Heo return NULL; 1359fbf59bc9STejun Heo } 1360fbf59bc9STejun Heo 13616710e594STejun Heo if (!is_atomic) 13626710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 13636710e594STejun Heo 1364403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1365fbf59bc9STejun Heo 1366edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1367edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1368edcb4639STejun Heo chunk = pcpu_reserved_chunk; 1369833af842STejun Heo 137040064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 137140064aecSDennis Zhou (Facebook) if (off < 0) { 1372833af842STejun Heo err = "alloc from reserved chunk failed"; 1373ccea34b5STejun Heo goto fail_unlock; 1374f2badb0cSTejun Heo } 1375833af842STejun Heo 137640064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1377edcb4639STejun Heo if (off >= 0) 1378edcb4639STejun Heo goto area_found; 1379833af842STejun Heo 1380f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1381ccea34b5STejun Heo goto fail_unlock; 1382edcb4639STejun Heo } 1383edcb4639STejun Heo 1384ccea34b5STejun Heo restart: 1385edcb4639STejun Heo /* search through normal chunks */ 1386fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1387fbf59bc9STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 138840064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, 138940064aecSDennis Zhou (Facebook) is_atomic); 139040064aecSDennis Zhou (Facebook) if (off < 0) 1391fbf59bc9STejun Heo continue; 1392ccea34b5STejun Heo 139340064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1394fbf59bc9STejun Heo if (off >= 0) 1395fbf59bc9STejun Heo goto area_found; 139640064aecSDennis Zhou (Facebook) 1397fbf59bc9STejun Heo } 1398fbf59bc9STejun Heo } 1399fbf59bc9STejun Heo 1400403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1401ccea34b5STejun Heo 1402b38d08f3STejun Heo /* 1403b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 1404b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 1405b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 1406b38d08f3STejun Heo */ 140711df02bfSDennis Zhou if (is_atomic) { 140811df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 14095835d96eSTejun Heo goto fail; 141011df02bfSDennis Zhou } 14115835d96eSTejun Heo 1412b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 14136081089fSTejun Heo chunk = pcpu_create_chunk(); 1414f2badb0cSTejun Heo if (!chunk) { 1415f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1416b38d08f3STejun Heo goto fail; 1417f2badb0cSTejun Heo } 1418ccea34b5STejun Heo 1419403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1420fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1421b38d08f3STejun Heo } else { 1422b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1423b38d08f3STejun Heo } 1424b38d08f3STejun Heo 1425ccea34b5STejun Heo goto restart; 1426fbf59bc9STejun Heo 1427fbf59bc9STejun Heo area_found: 142830a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 1429403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1430ccea34b5STejun Heo 1431dca49645STejun Heo /* populate if not all pages are already there */ 14325835d96eSTejun Heo if (!is_atomic) { 1433e04d3208STejun Heo int page_start, page_end, rs, re; 1434e04d3208STejun Heo 1435dca49645STejun Heo page_start = PFN_DOWN(off); 1436dca49645STejun Heo page_end = PFN_UP(off + size); 1437dca49645STejun Heo 143891e914c5SDennis Zhou (Facebook) pcpu_for_each_unpop_region(chunk->populated, rs, re, 143991e914c5SDennis Zhou (Facebook) page_start, page_end) { 1440dca49645STejun Heo WARN_ON(chunk->immutable); 1441dca49645STejun Heo 1442b38d08f3STejun Heo ret = pcpu_populate_chunk(chunk, rs, re); 1443b38d08f3STejun Heo 1444403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1445b38d08f3STejun Heo if (ret) { 144640064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1447f2badb0cSTejun Heo err = "failed to populate"; 1448ccea34b5STejun Heo goto fail_unlock; 1449fbf59bc9STejun Heo } 145040064aecSDennis Zhou (Facebook) pcpu_chunk_populated(chunk, rs, re, true); 1451b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1452dca49645STejun Heo } 1453dca49645STejun Heo 1454ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1455e04d3208STejun Heo } 1456ccea34b5STejun Heo 14571a4d7607STejun Heo if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 14581a4d7607STejun Heo pcpu_schedule_balance_work(); 14591a4d7607STejun Heo 1460dca49645STejun Heo /* clear the areas and return address relative to base address */ 1461dca49645STejun Heo for_each_possible_cpu(cpu) 1462dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1463dca49645STejun Heo 1464f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 14658a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1466df95e795SDennis Zhou 1467df95e795SDennis Zhou trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1468df95e795SDennis Zhou chunk->base_addr, off, ptr); 1469df95e795SDennis Zhou 1470f528f0b8SCatalin Marinas return ptr; 1471ccea34b5STejun Heo 1472ccea34b5STejun Heo fail_unlock: 1473403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1474b38d08f3STejun Heo fail: 1475df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1476df95e795SDennis Zhou 14775835d96eSTejun Heo if (!is_atomic && warn_limit) { 1478870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 14795835d96eSTejun Heo size, align, is_atomic, err); 1480f2badb0cSTejun Heo dump_stack(); 1481f2badb0cSTejun Heo if (!--warn_limit) 1482870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1483f2badb0cSTejun Heo } 14841a4d7607STejun Heo if (is_atomic) { 14851a4d7607STejun Heo /* see the flag handling in pcpu_blance_workfn() */ 14861a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 14871a4d7607STejun Heo pcpu_schedule_balance_work(); 14886710e594STejun Heo } else { 14896710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 14901a4d7607STejun Heo } 1491ccea34b5STejun Heo return NULL; 1492fbf59bc9STejun Heo } 1493edcb4639STejun Heo 1494edcb4639STejun Heo /** 14955835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1496edcb4639STejun Heo * @size: size of area to allocate in bytes 1497edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 14985835d96eSTejun Heo * @gfp: allocation flags 1499edcb4639STejun Heo * 15005835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 15015835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 15025835d96eSTejun Heo * be called from any context but is a lot more likely to fail. 1503ccea34b5STejun Heo * 1504edcb4639STejun Heo * RETURNS: 1505edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1506edcb4639STejun Heo */ 15075835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 15085835d96eSTejun Heo { 15095835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 15105835d96eSTejun Heo } 15115835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 15125835d96eSTejun Heo 15135835d96eSTejun Heo /** 15145835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 15155835d96eSTejun Heo * @size: size of area to allocate in bytes 15165835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 15175835d96eSTejun Heo * 15185835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 15195835d96eSTejun Heo */ 152043cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1521edcb4639STejun Heo { 15225835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1523edcb4639STejun Heo } 1524fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1525fbf59bc9STejun Heo 1526edcb4639STejun Heo /** 1527edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1528edcb4639STejun Heo * @size: size of area to allocate in bytes 1529edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1530edcb4639STejun Heo * 15319329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 15329329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 15339329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 15349329ba97STejun Heo * Might trigger writeouts. 1535edcb4639STejun Heo * 1536ccea34b5STejun Heo * CONTEXT: 1537ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1538ccea34b5STejun Heo * 1539edcb4639STejun Heo * RETURNS: 1540edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1541edcb4639STejun Heo */ 154243cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1543edcb4639STejun Heo { 15445835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1545edcb4639STejun Heo } 1546edcb4639STejun Heo 1547a56dbddfSTejun Heo /** 15481a4d7607STejun Heo * pcpu_balance_workfn - manage the amount of free chunks and populated pages 1549a56dbddfSTejun Heo * @work: unused 1550a56dbddfSTejun Heo * 1551a56dbddfSTejun Heo * Reclaim all fully free chunks except for the first one. 1552a56dbddfSTejun Heo */ 1553fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work) 1554fbf59bc9STejun Heo { 1555fe6bd8c3STejun Heo LIST_HEAD(to_free); 1556fe6bd8c3STejun Heo struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1557a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 15581a4d7607STejun Heo int slot, nr_to_pop, ret; 1559a56dbddfSTejun Heo 15601a4d7607STejun Heo /* 15611a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 15621a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 15631a4d7607STejun Heo */ 1564ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1565ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1566a56dbddfSTejun Heo 1567fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 15688d408b4bSTejun Heo WARN_ON(chunk->immutable); 1569a56dbddfSTejun Heo 1570a56dbddfSTejun Heo /* spare the first one */ 1571fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1572a56dbddfSTejun Heo continue; 1573a56dbddfSTejun Heo 1574fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 1575a56dbddfSTejun Heo } 1576a56dbddfSTejun Heo 1577ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1578a56dbddfSTejun Heo 1579fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 1580a93ace48STejun Heo int rs, re; 1581dca49645STejun Heo 158291e914c5SDennis Zhou (Facebook) pcpu_for_each_pop_region(chunk->populated, rs, re, 0, 158391e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 1584a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 1585b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 1586b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 1587b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 1588a93ace48STejun Heo } 15896081089fSTejun Heo pcpu_destroy_chunk(chunk); 1590fbf59bc9STejun Heo } 1591971f3918STejun Heo 15921a4d7607STejun Heo /* 15931a4d7607STejun Heo * Ensure there are certain number of free populated pages for 15941a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 15951a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 15961a4d7607STejun Heo * failed previously, always populate the maximum amount. This 15971a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 15981a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 15991a4d7607STejun Heo * something we support properly and can be highly unreliable and 16001a4d7607STejun Heo * inefficient. 16011a4d7607STejun Heo */ 16021a4d7607STejun Heo retry_pop: 16031a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 16041a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 16051a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 16061a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 16071a4d7607STejun Heo } else { 16081a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 16091a4d7607STejun Heo pcpu_nr_empty_pop_pages, 16101a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 16111a4d7607STejun Heo } 16121a4d7607STejun Heo 16131a4d7607STejun Heo for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { 16141a4d7607STejun Heo int nr_unpop = 0, rs, re; 16151a4d7607STejun Heo 16161a4d7607STejun Heo if (!nr_to_pop) 16171a4d7607STejun Heo break; 16181a4d7607STejun Heo 16191a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 16201a4d7607STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 16218ab16c43SDennis Zhou (Facebook) nr_unpop = chunk->nr_pages - chunk->nr_populated; 16221a4d7607STejun Heo if (nr_unpop) 16231a4d7607STejun Heo break; 16241a4d7607STejun Heo } 16251a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 16261a4d7607STejun Heo 16271a4d7607STejun Heo if (!nr_unpop) 16281a4d7607STejun Heo continue; 16291a4d7607STejun Heo 16301a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 163191e914c5SDennis Zhou (Facebook) pcpu_for_each_unpop_region(chunk->populated, rs, re, 0, 163291e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 16331a4d7607STejun Heo int nr = min(re - rs, nr_to_pop); 16341a4d7607STejun Heo 16351a4d7607STejun Heo ret = pcpu_populate_chunk(chunk, rs, rs + nr); 16361a4d7607STejun Heo if (!ret) { 16371a4d7607STejun Heo nr_to_pop -= nr; 16381a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 163940064aecSDennis Zhou (Facebook) pcpu_chunk_populated(chunk, rs, rs + nr, false); 16401a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 16411a4d7607STejun Heo } else { 16421a4d7607STejun Heo nr_to_pop = 0; 16431a4d7607STejun Heo } 16441a4d7607STejun Heo 16451a4d7607STejun Heo if (!nr_to_pop) 16461a4d7607STejun Heo break; 16471a4d7607STejun Heo } 16481a4d7607STejun Heo } 16491a4d7607STejun Heo 16501a4d7607STejun Heo if (nr_to_pop) { 16511a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 16521a4d7607STejun Heo chunk = pcpu_create_chunk(); 16531a4d7607STejun Heo if (chunk) { 16541a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 16551a4d7607STejun Heo pcpu_chunk_relocate(chunk, -1); 16561a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 16571a4d7607STejun Heo goto retry_pop; 16581a4d7607STejun Heo } 16591a4d7607STejun Heo } 16601a4d7607STejun Heo 1661971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1662a56dbddfSTejun Heo } 1663fbf59bc9STejun Heo 1664fbf59bc9STejun Heo /** 1665fbf59bc9STejun Heo * free_percpu - free percpu area 1666fbf59bc9STejun Heo * @ptr: pointer to area to free 1667fbf59bc9STejun Heo * 1668ccea34b5STejun Heo * Free percpu area @ptr. 1669ccea34b5STejun Heo * 1670ccea34b5STejun Heo * CONTEXT: 1671ccea34b5STejun Heo * Can be called from atomic context. 1672fbf59bc9STejun Heo */ 167343cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 1674fbf59bc9STejun Heo { 1675129182e5SAndrew Morton void *addr; 1676fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1677ccea34b5STejun Heo unsigned long flags; 167840064aecSDennis Zhou (Facebook) int off; 1679fbf59bc9STejun Heo 1680fbf59bc9STejun Heo if (!ptr) 1681fbf59bc9STejun Heo return; 1682fbf59bc9STejun Heo 1683f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 1684f528f0b8SCatalin Marinas 1685129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 1686129182e5SAndrew Morton 1687ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1688fbf59bc9STejun Heo 1689fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1690bba174f5STejun Heo off = addr - chunk->base_addr; 1691fbf59bc9STejun Heo 169240064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1693fbf59bc9STejun Heo 1694a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 169540064aecSDennis Zhou (Facebook) if (chunk->free_bytes == pcpu_unit_size) { 1696fbf59bc9STejun Heo struct pcpu_chunk *pos; 1697fbf59bc9STejun Heo 1698a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1699fbf59bc9STejun Heo if (pos != chunk) { 17001a4d7607STejun Heo pcpu_schedule_balance_work(); 1701fbf59bc9STejun Heo break; 1702fbf59bc9STejun Heo } 1703fbf59bc9STejun Heo } 1704fbf59bc9STejun Heo 1705df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 1706df95e795SDennis Zhou 1707ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1708fbf59bc9STejun Heo } 1709fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1710fbf59bc9STejun Heo 1711383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 1712383776faSThomas Gleixner { 1713383776faSThomas Gleixner #ifdef CONFIG_SMP 1714383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 1715383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 1716383776faSThomas Gleixner unsigned int cpu; 1717383776faSThomas Gleixner 1718383776faSThomas Gleixner for_each_possible_cpu(cpu) { 1719383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 1720383776faSThomas Gleixner void *va = (void *)addr; 1721383776faSThomas Gleixner 1722383776faSThomas Gleixner if (va >= start && va < start + static_size) { 17238ce371f9SPeter Zijlstra if (can_addr) { 1724383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 17258ce371f9SPeter Zijlstra *can_addr += (unsigned long) 17268ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 17278ce371f9SPeter Zijlstra } 1728383776faSThomas Gleixner return true; 1729383776faSThomas Gleixner } 1730383776faSThomas Gleixner } 1731383776faSThomas Gleixner #endif 1732383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 1733383776faSThomas Gleixner return false; 1734383776faSThomas Gleixner } 1735383776faSThomas Gleixner 17363b034b0dSVivek Goyal /** 173710fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 173810fad5e4STejun Heo * @addr: address to test 173910fad5e4STejun Heo * 174010fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 174110fad5e4STejun Heo * static percpu areas are not considered. For those, use 174210fad5e4STejun Heo * is_module_percpu_address(). 174310fad5e4STejun Heo * 174410fad5e4STejun Heo * RETURNS: 174510fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 174610fad5e4STejun Heo */ 174710fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 174810fad5e4STejun Heo { 1749383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 175010fad5e4STejun Heo } 175110fad5e4STejun Heo 175210fad5e4STejun Heo /** 17533b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 17543b034b0dSVivek Goyal * @addr: the address to be converted to physical address 17553b034b0dSVivek Goyal * 17563b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 17573b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 17583b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 17593b034b0dSVivek Goyal * until this function finishes. 17603b034b0dSVivek Goyal * 176167589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 176267589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 176367589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 176467589c71SDave Young * km) provides translation. 176567589c71SDave Young * 1766bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 176767589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 176867589c71SDave Young * actually works, and the verification can discover both bugs in percpu 176967589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 177067589c71SDave Young * code. 177167589c71SDave Young * 17723b034b0dSVivek Goyal * RETURNS: 17733b034b0dSVivek Goyal * The physical address for @addr. 17743b034b0dSVivek Goyal */ 17753b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 17763b034b0dSVivek Goyal { 17779983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 17789983b6f0STejun Heo bool in_first_chunk = false; 1779a855b84cSTejun Heo unsigned long first_low, first_high; 17809983b6f0STejun Heo unsigned int cpu; 17819983b6f0STejun Heo 17829983b6f0STejun Heo /* 1783a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 17849983b6f0STejun Heo * necessary but will speed up lookups of addresses which 17859983b6f0STejun Heo * aren't in the first chunk. 1786c0ebfdc3SDennis Zhou (Facebook) * 1787c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr 1788c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the 1789c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may 1790c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size). 17919983b6f0STejun Heo */ 1792c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr + 1793c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 1794c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr + 1795c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 1796a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 1797a855b84cSTejun Heo (unsigned long)addr < first_high) { 17989983b6f0STejun Heo for_each_possible_cpu(cpu) { 17999983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 18009983b6f0STejun Heo 18019983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 18029983b6f0STejun Heo in_first_chunk = true; 18039983b6f0STejun Heo break; 18049983b6f0STejun Heo } 18059983b6f0STejun Heo } 18069983b6f0STejun Heo } 18079983b6f0STejun Heo 18089983b6f0STejun Heo if (in_first_chunk) { 1809eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 18103b034b0dSVivek Goyal return __pa(addr); 18113b034b0dSVivek Goyal else 18129f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 18139f57bd4dSEugene Surovegin offset_in_page(addr); 1814020ec653STejun Heo } else 18159f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 18169f57bd4dSEugene Surovegin offset_in_page(addr); 18173b034b0dSVivek Goyal } 18183b034b0dSVivek Goyal 1819fbf59bc9STejun Heo /** 1820fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 1821fd1e8a1fSTejun Heo * @nr_groups: the number of groups 1822fd1e8a1fSTejun Heo * @nr_units: the number of units 1823033e48fbSTejun Heo * 1824fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 1825fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 1826fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 1827fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1828fd1e8a1fSTejun Heo * pointer of other groups. 1829033e48fbSTejun Heo * 1830033e48fbSTejun Heo * RETURNS: 1831fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 1832fd1e8a1fSTejun Heo * failure. 1833033e48fbSTejun Heo */ 1834fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1835fd1e8a1fSTejun Heo int nr_units) 1836fd1e8a1fSTejun Heo { 1837fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 1838fd1e8a1fSTejun Heo size_t base_size, ai_size; 1839fd1e8a1fSTejun Heo void *ptr; 1840fd1e8a1fSTejun Heo int unit; 1841fd1e8a1fSTejun Heo 1842fd1e8a1fSTejun Heo base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1843fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 1844fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1845fd1e8a1fSTejun Heo 1846999c17e3SSantosh Shilimkar ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); 1847fd1e8a1fSTejun Heo if (!ptr) 1848fd1e8a1fSTejun Heo return NULL; 1849fd1e8a1fSTejun Heo ai = ptr; 1850fd1e8a1fSTejun Heo ptr += base_size; 1851fd1e8a1fSTejun Heo 1852fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 1853fd1e8a1fSTejun Heo 1854fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 1855fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 1856fd1e8a1fSTejun Heo 1857fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 1858fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 1859fd1e8a1fSTejun Heo 1860fd1e8a1fSTejun Heo return ai; 1861fd1e8a1fSTejun Heo } 1862fd1e8a1fSTejun Heo 1863fd1e8a1fSTejun Heo /** 1864fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 1865fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 1866fd1e8a1fSTejun Heo * 1867fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1868fd1e8a1fSTejun Heo */ 1869fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1870fd1e8a1fSTejun Heo { 1871999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 1872fd1e8a1fSTejun Heo } 1873fd1e8a1fSTejun Heo 1874fd1e8a1fSTejun Heo /** 1875fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1876fd1e8a1fSTejun Heo * @lvl: loglevel 1877fd1e8a1fSTejun Heo * @ai: allocation info to dump 1878fd1e8a1fSTejun Heo * 1879fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 1880fd1e8a1fSTejun Heo */ 1881fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 1882fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 1883033e48fbSTejun Heo { 1884fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 1885033e48fbSTejun Heo char empty_str[] = "--------"; 1886fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 1887fd1e8a1fSTejun Heo int group, v; 1888fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 1889033e48fbSTejun Heo 1890fd1e8a1fSTejun Heo v = ai->nr_groups; 1891033e48fbSTejun Heo while (v /= 10) 1892fd1e8a1fSTejun Heo group_width++; 1893033e48fbSTejun Heo 1894fd1e8a1fSTejun Heo v = num_possible_cpus(); 1895fd1e8a1fSTejun Heo while (v /= 10) 1896fd1e8a1fSTejun Heo cpu_width++; 1897fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1898033e48fbSTejun Heo 1899fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 1900fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 1901fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 1902033e48fbSTejun Heo 1903fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1904fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1905fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1906fd1e8a1fSTejun Heo 1907fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 1908fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 1909fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 1910fd1e8a1fSTejun Heo 1911fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 1912fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 1913fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 1914fd1e8a1fSTejun Heo if (!(alloc % apl)) { 19151170532bSJoe Perches pr_cont("\n"); 1916fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 1917033e48fbSTejun Heo } 19181170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 1919fd1e8a1fSTejun Heo 1920fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 1921fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 19221170532bSJoe Perches pr_cont("%0*d ", 19231170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 1924033e48fbSTejun Heo else 19251170532bSJoe Perches pr_cont("%s ", empty_str); 1926033e48fbSTejun Heo } 1927fd1e8a1fSTejun Heo } 19281170532bSJoe Perches pr_cont("\n"); 1929033e48fbSTejun Heo } 1930033e48fbSTejun Heo 1931fbf59bc9STejun Heo /** 19328d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 1933fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 193438a6be52STejun Heo * @base_addr: mapped address 1935fbf59bc9STejun Heo * 19368d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 19378d408b4bSTejun Heo * perpcu area. This function is to be called from arch percpu area 193838a6be52STejun Heo * setup path. 19398d408b4bSTejun Heo * 1940fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 1941fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 19428d408b4bSTejun Heo * 1943fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 1944fd1e8a1fSTejun Heo * 1945fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1946edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 1947edcb4639STejun Heo * the first chunk such that it's available only through reserved 1948edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 1949edcb4639STejun Heo * static areas on architectures where the addressing model has 1950edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 1951edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 1952edcb4639STejun Heo * 1953fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 1954fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 1955fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 19566074d5b0STejun Heo * 1957fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1958fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 1959fd1e8a1fSTejun Heo * @ai->dyn_size. 19608d408b4bSTejun Heo * 1961fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 1962fd1e8a1fSTejun Heo * for vm areas. 19638d408b4bSTejun Heo * 1964fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 1965fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 1966fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 1967fd1e8a1fSTejun Heo * 1968fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 1969fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 1970fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 1971fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 1972fd1e8a1fSTejun Heo * all units is assumed. 19738d408b4bSTejun Heo * 197438a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 197538a6be52STejun Heo * copied static data to each unit. 1976fbf59bc9STejun Heo * 1977c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region. 1978c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first 1979c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks - 1980c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They 1981c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map. 1982c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots 1983c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk. 1984edcb4639STejun Heo * 1985fbf59bc9STejun Heo * RETURNS: 1986fb435d52STejun Heo * 0 on success, -errno on failure. 1987fbf59bc9STejun Heo */ 1988fb435d52STejun Heo int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1989fd1e8a1fSTejun Heo void *base_addr) 1990fbf59bc9STejun Heo { 1991b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1992d2f3c384SDennis Zhou (Facebook) size_t static_size, dyn_size; 19930c4169c3SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 19946563297cSTejun Heo unsigned long *group_offsets; 19956563297cSTejun Heo size_t *group_sizes; 1996fb435d52STejun Heo unsigned long *unit_off; 1997fbf59bc9STejun Heo unsigned int cpu; 1998fd1e8a1fSTejun Heo int *unit_map; 1999fd1e8a1fSTejun Heo int group, unit, i; 2000c0ebfdc3SDennis Zhou (Facebook) int map_size; 2001c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr; 2002fbf59bc9STejun Heo 2003635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 2004635b75fcSTejun Heo if (unlikely(cond)) { \ 2005870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 2006870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 2007807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 2008635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2009635b75fcSTejun Heo BUG(); \ 2010635b75fcSTejun Heo } \ 2011635b75fcSTejun Heo } while (0) 2012635b75fcSTejun Heo 20132f39e637STejun Heo /* sanity checks */ 2014635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2015bbddff05STejun Heo #ifdef CONFIG_SMP 2016635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 2017f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2018bbddff05STejun Heo #endif 2019635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 2020f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2021635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2022f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2023635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2024ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2025099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2026fb29a2ccSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!ai->dyn_size); 2027d2f3c384SDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2028ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2029ca460b3cSDennis Zhou (Facebook) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 20309f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 20318d408b4bSTejun Heo 20326563297cSTejun Heo /* process group information and build config tables accordingly */ 2033999c17e3SSantosh Shilimkar group_offsets = memblock_virt_alloc(ai->nr_groups * 2034999c17e3SSantosh Shilimkar sizeof(group_offsets[0]), 0); 2035999c17e3SSantosh Shilimkar group_sizes = memblock_virt_alloc(ai->nr_groups * 2036999c17e3SSantosh Shilimkar sizeof(group_sizes[0]), 0); 2037999c17e3SSantosh Shilimkar unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); 2038999c17e3SSantosh Shilimkar unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); 20392f39e637STejun Heo 2040fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2041ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 2042a855b84cSTejun Heo 2043a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 2044a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 20452f39e637STejun Heo 2046fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2047fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 20482f39e637STejun Heo 20496563297cSTejun Heo group_offsets[group] = gi->base_offset; 20506563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 20516563297cSTejun Heo 2052fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 2053fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 2054fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 2055fd1e8a1fSTejun Heo continue; 2056fd1e8a1fSTejun Heo 20579f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2058635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2059635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2060fd1e8a1fSTejun Heo 2061fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 2062fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2063fb435d52STejun Heo 2064a855b84cSTejun Heo /* determine low/high unit_cpu */ 2065a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 2066a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2067a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 2068a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 2069a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2070a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 20710fc0531eSLinus Torvalds } 20720fc0531eSLinus Torvalds } 2073fd1e8a1fSTejun Heo pcpu_nr_units = unit; 20742f39e637STejun Heo 20752f39e637STejun Heo for_each_possible_cpu(cpu) 2076635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2077635b75fcSTejun Heo 2078635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 2079635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 2080bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 20812f39e637STejun Heo 20826563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 20836563297cSTejun Heo pcpu_group_offsets = group_offsets; 20846563297cSTejun Heo pcpu_group_sizes = group_sizes; 2085fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 2086fb435d52STejun Heo pcpu_unit_offsets = unit_off; 20872f39e637STejun Heo 20882f39e637STejun Heo /* determine basic parameters */ 2089fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2090d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 20916563297cSTejun Heo pcpu_atom_size = ai->atom_size; 2092ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 2093ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 2094cafe8816STejun Heo 209530a5b536SDennis Zhou pcpu_stats_save_ai(ai); 209630a5b536SDennis Zhou 2097d9b55eebSTejun Heo /* 2098d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 2099d9b55eebSTejun Heo * empty chunks. 2100d9b55eebSTejun Heo */ 2101d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 2102999c17e3SSantosh Shilimkar pcpu_slot = memblock_virt_alloc( 2103999c17e3SSantosh Shilimkar pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); 2104fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 2105fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 2106fbf59bc9STejun Heo 2107edcb4639STejun Heo /* 2108d2f3c384SDennis Zhou (Facebook) * The end of the static region needs to be aligned with the 2109d2f3c384SDennis Zhou (Facebook) * minimum allocation size as this offsets the reserved and 2110d2f3c384SDennis Zhou (Facebook) * dynamic region. The first chunk ends page aligned by 2111d2f3c384SDennis Zhou (Facebook) * expanding the dynamic region, therefore the dynamic region 2112d2f3c384SDennis Zhou (Facebook) * can be shrunk to compensate while still staying above the 2113d2f3c384SDennis Zhou (Facebook) * configured sizes. 2114d2f3c384SDennis Zhou (Facebook) */ 2115d2f3c384SDennis Zhou (Facebook) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2116d2f3c384SDennis Zhou (Facebook) dyn_size = ai->dyn_size - (static_size - ai->static_size); 2117d2f3c384SDennis Zhou (Facebook) 2118d2f3c384SDennis Zhou (Facebook) /* 2119c0ebfdc3SDennis Zhou (Facebook) * Initialize first chunk. 2120c0ebfdc3SDennis Zhou (Facebook) * If the reserved_size is non-zero, this initializes the reserved 2121c0ebfdc3SDennis Zhou (Facebook) * chunk. If the reserved_size is zero, the reserved chunk is NULL 2122c0ebfdc3SDennis Zhou (Facebook) * and the dynamic region is initialized here. The first chunk, 2123c0ebfdc3SDennis Zhou (Facebook) * pcpu_first_chunk, will always point to the chunk that serves 2124c0ebfdc3SDennis Zhou (Facebook) * the dynamic region. 2125edcb4639STejun Heo */ 2126d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size; 2127d2f3c384SDennis Zhou (Facebook) map_size = ai->reserved_size ?: dyn_size; 212840064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 212961ace7faSTejun Heo 2130edcb4639STejun Heo /* init dynamic chunk if necessary */ 2131b9c39442SDennis Zhou (Facebook) if (ai->reserved_size) { 21320c4169c3SDennis Zhou (Facebook) pcpu_reserved_chunk = chunk; 2133b9c39442SDennis Zhou (Facebook) 2134d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size + 2135c0ebfdc3SDennis Zhou (Facebook) ai->reserved_size; 2136d2f3c384SDennis Zhou (Facebook) map_size = dyn_size; 213740064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2138edcb4639STejun Heo } 2139edcb4639STejun Heo 21402441d15cSTejun Heo /* link the first chunk in */ 21410c4169c3SDennis Zhou (Facebook) pcpu_first_chunk = chunk; 21420cecf50cSDennis Zhou (Facebook) pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2143ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 2144fbf59bc9STejun Heo 214530a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 2146df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 214730a5b536SDennis Zhou 2148fbf59bc9STejun Heo /* we're done */ 2149bba174f5STejun Heo pcpu_base_addr = base_addr; 2150fb435d52STejun Heo return 0; 2151fbf59bc9STejun Heo } 215266c3a757STejun Heo 2153bbddff05STejun Heo #ifdef CONFIG_SMP 2154bbddff05STejun Heo 215517f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2156f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 2157f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 2158f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 2159f58dc01bSTejun Heo }; 216066c3a757STejun Heo 2161f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2162f58dc01bSTejun Heo 2163f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 216466c3a757STejun Heo { 21655479c78aSCyrill Gorcunov if (!str) 21665479c78aSCyrill Gorcunov return -EINVAL; 21675479c78aSCyrill Gorcunov 2168f58dc01bSTejun Heo if (0) 2169f58dc01bSTejun Heo /* nada */; 2170f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2171f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 2172f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 2173f58dc01bSTejun Heo #endif 2174f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2175f58dc01bSTejun Heo else if (!strcmp(str, "page")) 2176f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 2177f58dc01bSTejun Heo #endif 2178f58dc01bSTejun Heo else 2179870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 218066c3a757STejun Heo 2181f58dc01bSTejun Heo return 0; 218266c3a757STejun Heo } 2183f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 218466c3a757STejun Heo 21853c9a024fSTejun Heo /* 21863c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 21873c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 21883c9a024fSTejun Heo * to be used. 21893c9a024fSTejun Heo */ 219008fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 219108fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 21923c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 21933c9a024fSTejun Heo #endif 21943c9a024fSTejun Heo 21953c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 21963c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 21973c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 21983c9a024fSTejun Heo #endif 21993c9a024fSTejun Heo 22003c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 22013c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 22023c9a024fSTejun Heo /** 2203fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2204fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2205fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2206fbf59bc9STejun Heo * @atom_size: allocation atom size 2207fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2208fbf59bc9STejun Heo * 2209fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 2210fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 2211fbf59bc9STejun Heo * atom size and distances between CPUs. 2212fbf59bc9STejun Heo * 2213bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 2214fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 2215fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 2216fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 2217fbf59bc9STejun Heo * of allocated virtual address space. 2218fbf59bc9STejun Heo * 2219fbf59bc9STejun Heo * RETURNS: 2220fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 2221fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 2222fbf59bc9STejun Heo */ 2223fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 2224fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 2225fbf59bc9STejun Heo size_t atom_size, 2226fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2227fbf59bc9STejun Heo { 2228fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 2229fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 2230fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 2231fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 2232fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 2233fbf59bc9STejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 2234fbf59bc9STejun Heo int last_allocs, group, unit; 2235fbf59bc9STejun Heo unsigned int cpu, tcpu; 2236fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 2237fbf59bc9STejun Heo unsigned int *cpu_map; 2238fbf59bc9STejun Heo 2239fbf59bc9STejun Heo /* this function may be called multiple times */ 2240fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 2241fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 2242fbf59bc9STejun Heo 2243fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2244fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 2245fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2246fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 2247fbf59bc9STejun Heo 2248fbf59bc9STejun Heo /* 2249fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 2250fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 225125985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 2252fbf59bc9STejun Heo * or larger than min_unit_size. 2253fbf59bc9STejun Heo */ 2254fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2255fbf59bc9STejun Heo 22569c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */ 2257fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 2258fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 2259f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2260fbf59bc9STejun Heo upa--; 2261fbf59bc9STejun Heo max_upa = upa; 2262fbf59bc9STejun Heo 2263fbf59bc9STejun Heo /* group cpus according to their proximity */ 2264fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 2265fbf59bc9STejun Heo group = 0; 2266fbf59bc9STejun Heo next_group: 2267fbf59bc9STejun Heo for_each_possible_cpu(tcpu) { 2268fbf59bc9STejun Heo if (cpu == tcpu) 2269fbf59bc9STejun Heo break; 2270fbf59bc9STejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 2271fbf59bc9STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 2272fbf59bc9STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 2273fbf59bc9STejun Heo group++; 2274fbf59bc9STejun Heo nr_groups = max(nr_groups, group + 1); 2275fbf59bc9STejun Heo goto next_group; 2276fbf59bc9STejun Heo } 2277fbf59bc9STejun Heo } 2278fbf59bc9STejun Heo group_map[cpu] = group; 2279fbf59bc9STejun Heo group_cnt[group]++; 2280fbf59bc9STejun Heo } 2281fbf59bc9STejun Heo 2282fbf59bc9STejun Heo /* 22839c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt. 22849c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated. 22859c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size. 2286fbf59bc9STejun Heo */ 2287fbf59bc9STejun Heo last_allocs = INT_MAX; 2288fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 2289fbf59bc9STejun Heo int allocs = 0, wasted = 0; 2290fbf59bc9STejun Heo 2291f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2292fbf59bc9STejun Heo continue; 2293fbf59bc9STejun Heo 2294fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2295fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2296fbf59bc9STejun Heo allocs += this_allocs; 2297fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 2298fbf59bc9STejun Heo } 2299fbf59bc9STejun Heo 2300fbf59bc9STejun Heo /* 2301fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 2302fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 2303fbf59bc9STejun Heo * passes the following check. 2304fbf59bc9STejun Heo */ 2305fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 2306fbf59bc9STejun Heo continue; 2307fbf59bc9STejun Heo 2308fbf59bc9STejun Heo /* and then don't consume more memory */ 2309fbf59bc9STejun Heo if (allocs > last_allocs) 2310fbf59bc9STejun Heo break; 2311fbf59bc9STejun Heo last_allocs = allocs; 2312fbf59bc9STejun Heo best_upa = upa; 2313fbf59bc9STejun Heo } 2314fbf59bc9STejun Heo upa = best_upa; 2315fbf59bc9STejun Heo 2316fbf59bc9STejun Heo /* allocate and fill alloc_info */ 2317fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 2318fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 2319fbf59bc9STejun Heo 2320fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2321fbf59bc9STejun Heo if (!ai) 2322fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 2323fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 2324fbf59bc9STejun Heo 2325fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2326fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 2327fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 2328fbf59bc9STejun Heo } 2329fbf59bc9STejun Heo 2330fbf59bc9STejun Heo ai->static_size = static_size; 2331fbf59bc9STejun Heo ai->reserved_size = reserved_size; 2332fbf59bc9STejun Heo ai->dyn_size = dyn_size; 2333fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 2334fbf59bc9STejun Heo ai->atom_size = atom_size; 2335fbf59bc9STejun Heo ai->alloc_size = alloc_size; 2336fbf59bc9STejun Heo 2337fbf59bc9STejun Heo for (group = 0, unit = 0; group_cnt[group]; group++) { 2338fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2339fbf59bc9STejun Heo 2340fbf59bc9STejun Heo /* 2341fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 2342fbf59bc9STejun Heo * back-to-back. The caller should update this to 2343fbf59bc9STejun Heo * reflect actual allocation. 2344fbf59bc9STejun Heo */ 2345fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 2346fbf59bc9STejun Heo 2347fbf59bc9STejun Heo for_each_possible_cpu(cpu) 2348fbf59bc9STejun Heo if (group_map[cpu] == group) 2349fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 2350fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 2351fbf59bc9STejun Heo unit += gi->nr_units; 2352fbf59bc9STejun Heo } 2353fbf59bc9STejun Heo BUG_ON(unit != nr_units); 2354fbf59bc9STejun Heo 2355fbf59bc9STejun Heo return ai; 2356fbf59bc9STejun Heo } 23573c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 2358fbf59bc9STejun Heo 23593c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 236066c3a757STejun Heo /** 236166c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 236266c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 23634ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2364c8826dd5STejun Heo * @atom_size: allocation atom size 2365c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2366c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 236725985edcSLucas De Marchi * @free_fn: function to free percpu page 236866c3a757STejun Heo * 236966c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 237066c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 237166c3a757STejun Heo * 237266c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 2373c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 2374c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 2375c8826dd5STejun Heo * aligned to @atom_size. 2376c8826dd5STejun Heo * 2377c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 2378c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 2379c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 2380c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 2381c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 2382c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 238366c3a757STejun Heo * 23844ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 238566c3a757STejun Heo * 238666c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 2387c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 238866c3a757STejun Heo * 238966c3a757STejun Heo * RETURNS: 2390fb435d52STejun Heo * 0 on success, -errno on failure. 239166c3a757STejun Heo */ 23924ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 2393c8826dd5STejun Heo size_t atom_size, 2394c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 2395c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2396c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 239766c3a757STejun Heo { 2398c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 2399c8826dd5STejun Heo void **areas = NULL; 2400fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 240193c76b6bSzijun_hu size_t size_sum, areas_size; 240293c76b6bSzijun_hu unsigned long max_distance; 24039b739662Szijun_hu int group, i, highest_group, rc; 240466c3a757STejun Heo 2405c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 2406c8826dd5STejun Heo cpu_distance_fn); 2407fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2408fd1e8a1fSTejun Heo return PTR_ERR(ai); 240966c3a757STejun Heo 2410fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2411c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 241266c3a757STejun Heo 2413999c17e3SSantosh Shilimkar areas = memblock_virt_alloc_nopanic(areas_size, 0); 2414c8826dd5STejun Heo if (!areas) { 2415fb435d52STejun Heo rc = -ENOMEM; 2416c8826dd5STejun Heo goto out_free; 2417fa8a7094STejun Heo } 241866c3a757STejun Heo 24199b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 24209b739662Szijun_hu highest_group = 0; 2421c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2422c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2423c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 2424c8826dd5STejun Heo void *ptr; 242566c3a757STejun Heo 2426c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 2427c8826dd5STejun Heo cpu = gi->cpu_map[i]; 2428c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 2429c8826dd5STejun Heo 2430c8826dd5STejun Heo /* allocate space for the whole group */ 2431c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 2432c8826dd5STejun Heo if (!ptr) { 2433c8826dd5STejun Heo rc = -ENOMEM; 2434c8826dd5STejun Heo goto out_free_areas; 2435c8826dd5STejun Heo } 2436f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2437f528f0b8SCatalin Marinas kmemleak_free(ptr); 2438c8826dd5STejun Heo areas[group] = ptr; 2439c8826dd5STejun Heo 2440c8826dd5STejun Heo base = min(ptr, base); 24419b739662Szijun_hu if (ptr > areas[highest_group]) 24429b739662Szijun_hu highest_group = group; 24439b739662Szijun_hu } 24449b739662Szijun_hu max_distance = areas[highest_group] - base; 24459b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 24469b739662Szijun_hu 24479b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 24489b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 24499b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 24509b739662Szijun_hu max_distance, VMALLOC_TOTAL); 24519b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 24529b739662Szijun_hu /* and fail if we have fallback */ 24539b739662Szijun_hu rc = -EINVAL; 24549b739662Szijun_hu goto out_free_areas; 24559b739662Szijun_hu #endif 245642b64281STejun Heo } 245742b64281STejun Heo 245842b64281STejun Heo /* 245942b64281STejun Heo * Copy data and free unused parts. This should happen after all 246042b64281STejun Heo * allocations are complete; otherwise, we may end up with 246142b64281STejun Heo * overlapping groups. 246242b64281STejun Heo */ 246342b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 246442b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 246542b64281STejun Heo void *ptr = areas[group]; 2466c8826dd5STejun Heo 2467c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 2468c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 2469c8826dd5STejun Heo /* unused unit, free whole */ 2470c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 2471c8826dd5STejun Heo continue; 2472c8826dd5STejun Heo } 2473c8826dd5STejun Heo /* copy and return the unused part */ 2474fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 2475c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 2476c8826dd5STejun Heo } 247766c3a757STejun Heo } 247866c3a757STejun Heo 2479c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 24806ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2481c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 24826ea529a2STejun Heo } 2483c8826dd5STejun Heo 2484870d4b12SJoe Perches pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 2485fd1e8a1fSTejun Heo PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 2486fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 248766c3a757STejun Heo 2488fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, base); 2489c8826dd5STejun Heo goto out_free; 2490c8826dd5STejun Heo 2491c8826dd5STejun Heo out_free_areas: 2492c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 2493f851c8d8SMichael Holzheu if (areas[group]) 2494c8826dd5STejun Heo free_fn(areas[group], 2495c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 2496c8826dd5STejun Heo out_free: 2497fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2498c8826dd5STejun Heo if (areas) 2499999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 2500fb435d52STejun Heo return rc; 2501d4b95f80STejun Heo } 25023c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 2503d4b95f80STejun Heo 25043c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 2505d4b95f80STejun Heo /** 250600ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2507d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2508d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 250925985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 2510d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 2511d4b95f80STejun Heo * 251200ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 251300ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 2514d4b95f80STejun Heo * 2515d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 2516d4b95f80STejun Heo * page-by-page into vmalloc area. 2517d4b95f80STejun Heo * 2518d4b95f80STejun Heo * RETURNS: 2519fb435d52STejun Heo * 0 on success, -errno on failure. 2520d4b95f80STejun Heo */ 2521fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 2522d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2523d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 2524d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 2525d4b95f80STejun Heo { 25268f05a6a6STejun Heo static struct vm_struct vm; 2527fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 252800ae4064STejun Heo char psize_str[16]; 2529ce3141a2STejun Heo int unit_pages; 2530d4b95f80STejun Heo size_t pages_size; 2531ce3141a2STejun Heo struct page **pages; 2532fb435d52STejun Heo int unit, i, j, rc; 25338f606604Szijun_hu int upa; 25348f606604Szijun_hu int nr_g0_units; 2535d4b95f80STejun Heo 253600ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 253700ae4064STejun Heo 25384ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 2539fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2540fd1e8a1fSTejun Heo return PTR_ERR(ai); 2541fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 25428f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 25438f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 25448f606604Szijun_hu if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) { 25458f606604Szijun_hu pcpu_free_alloc_info(ai); 25468f606604Szijun_hu return -EINVAL; 25478f606604Szijun_hu } 2548fd1e8a1fSTejun Heo 2549fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 2550d4b95f80STejun Heo 2551d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 2552fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2553fd1e8a1fSTejun Heo sizeof(pages[0])); 2554999c17e3SSantosh Shilimkar pages = memblock_virt_alloc(pages_size, 0); 2555d4b95f80STejun Heo 25568f05a6a6STejun Heo /* allocate pages */ 2557d4b95f80STejun Heo j = 0; 25588f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 2559fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 25608f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 2561d4b95f80STejun Heo void *ptr; 2562d4b95f80STejun Heo 25633cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2564d4b95f80STejun Heo if (!ptr) { 2565870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 2566598d8091SJoe Perches psize_str, cpu); 2567d4b95f80STejun Heo goto enomem; 2568d4b95f80STejun Heo } 2569f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2570f528f0b8SCatalin Marinas kmemleak_free(ptr); 2571ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 2572d4b95f80STejun Heo } 25738f606604Szijun_hu } 2574d4b95f80STejun Heo 25758f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 25768f05a6a6STejun Heo vm.flags = VM_ALLOC; 2577fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 25788f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 25798f05a6a6STejun Heo 2580fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 25811d9d3257STejun Heo unsigned long unit_addr = 2582fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 25838f05a6a6STejun Heo 2584ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 25858f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 25868f05a6a6STejun Heo 25878f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 2588fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2589ce3141a2STejun Heo unit_pages); 2590fb435d52STejun Heo if (rc < 0) 2591fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 25928f05a6a6STejun Heo 25938f05a6a6STejun Heo /* 25948f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 25958f05a6a6STejun Heo * cache for the linear mapping here - something 25968f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 25978f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 25988f05a6a6STejun Heo * data structures are not set up yet. 25998f05a6a6STejun Heo */ 26008f05a6a6STejun Heo 26018f05a6a6STejun Heo /* copy static data */ 2602fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 260366c3a757STejun Heo } 260466c3a757STejun Heo 260566c3a757STejun Heo /* we're ready, commit */ 2606870d4b12SJoe Perches pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n", 2607fd1e8a1fSTejun Heo unit_pages, psize_str, vm.addr, ai->static_size, 2608fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 260966c3a757STejun Heo 2610fb435d52STejun Heo rc = pcpu_setup_first_chunk(ai, vm.addr); 2611d4b95f80STejun Heo goto out_free_ar; 2612d4b95f80STejun Heo 2613d4b95f80STejun Heo enomem: 2614d4b95f80STejun Heo while (--j >= 0) 2615ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 2616fb435d52STejun Heo rc = -ENOMEM; 2617d4b95f80STejun Heo out_free_ar: 2618999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 2619fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2620fb435d52STejun Heo return rc; 262166c3a757STejun Heo } 26223c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 2623d4b95f80STejun Heo 2624bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 26258c4bfc6eSTejun Heo /* 2626bbddff05STejun Heo * Generic SMP percpu area setup. 2627e74e3962STejun Heo * 2628e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 2629e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 2630e74e3962STejun Heo * important because many archs have addressing restrictions and might 2631e74e3962STejun Heo * fail if the percpu area is located far away from the previous 2632e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 2633e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 2634e74e3962STejun Heo * on the physical linear memory mapping which uses large page 2635e74e3962STejun Heo * mappings on applicable archs. 2636e74e3962STejun Heo */ 2637e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2638e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 2639e74e3962STejun Heo 2640c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2641c8826dd5STejun Heo size_t align) 2642c8826dd5STejun Heo { 2643999c17e3SSantosh Shilimkar return memblock_virt_alloc_from_nopanic( 2644999c17e3SSantosh Shilimkar size, align, __pa(MAX_DMA_ADDRESS)); 2645c8826dd5STejun Heo } 2646c8826dd5STejun Heo 2647c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2648c8826dd5STejun Heo { 2649999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 2650c8826dd5STejun Heo } 2651c8826dd5STejun Heo 2652e74e3962STejun Heo void __init setup_per_cpu_areas(void) 2653e74e3962STejun Heo { 2654e74e3962STejun Heo unsigned long delta; 2655e74e3962STejun Heo unsigned int cpu; 2656fb435d52STejun Heo int rc; 2657e74e3962STejun Heo 2658e74e3962STejun Heo /* 2659e74e3962STejun Heo * Always reserve area for module percpu variables. That's 2660e74e3962STejun Heo * what the legacy allocator did. 2661e74e3962STejun Heo */ 2662fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2663c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2664c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2665fb435d52STejun Heo if (rc < 0) 2666bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2667e74e3962STejun Heo 2668e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2669e74e3962STejun Heo for_each_possible_cpu(cpu) 2670fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2671e74e3962STejun Heo } 2672e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2673099a19d9STejun Heo 2674bbddff05STejun Heo #else /* CONFIG_SMP */ 2675bbddff05STejun Heo 2676bbddff05STejun Heo /* 2677bbddff05STejun Heo * UP percpu area setup. 2678bbddff05STejun Heo * 2679bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 2680bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 2681bbddff05STejun Heo * variables and don't require any special preparation. 2682bbddff05STejun Heo */ 2683bbddff05STejun Heo void __init setup_per_cpu_areas(void) 2684bbddff05STejun Heo { 2685bbddff05STejun Heo const size_t unit_size = 2686bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 2687bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 2688bbddff05STejun Heo struct pcpu_alloc_info *ai; 2689bbddff05STejun Heo void *fc; 2690bbddff05STejun Heo 2691bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 2692999c17e3SSantosh Shilimkar fc = memblock_virt_alloc_from_nopanic(unit_size, 2693999c17e3SSantosh Shilimkar PAGE_SIZE, 2694999c17e3SSantosh Shilimkar __pa(MAX_DMA_ADDRESS)); 2695bbddff05STejun Heo if (!ai || !fc) 2696bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 2697100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2698100d13c3SCatalin Marinas kmemleak_free(fc); 2699bbddff05STejun Heo 2700bbddff05STejun Heo ai->dyn_size = unit_size; 2701bbddff05STejun Heo ai->unit_size = unit_size; 2702bbddff05STejun Heo ai->atom_size = unit_size; 2703bbddff05STejun Heo ai->alloc_size = unit_size; 2704bbddff05STejun Heo ai->groups[0].nr_units = 1; 2705bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 2706bbddff05STejun Heo 2707bbddff05STejun Heo if (pcpu_setup_first_chunk(ai, fc) < 0) 2708bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2709bbddff05STejun Heo } 2710bbddff05STejun Heo 2711bbddff05STejun Heo #endif /* CONFIG_SMP */ 2712bbddff05STejun Heo 2713099a19d9STejun Heo /* 27141a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 27151a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 27161a4d7607STejun Heo * and running. 27171a4d7607STejun Heo */ 27181a4d7607STejun Heo static int __init percpu_enable_async(void) 27191a4d7607STejun Heo { 27201a4d7607STejun Heo pcpu_async_enabled = true; 27211a4d7607STejun Heo return 0; 27221a4d7607STejun Heo } 27231a4d7607STejun Heo subsys_initcall(percpu_enable_async); 2724