155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2fbf59bc9STejun Heo /* 388999a89STejun Heo * mm/percpu.c - percpu memory allocator 4fbf59bc9STejun Heo * 5fbf59bc9STejun Heo * Copyright (C) 2009 SUSE Linux Products GmbH 6fbf59bc9STejun Heo * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 7fbf59bc9STejun Heo * 85e81ee3eSDennis Zhou (Facebook) * Copyright (C) 2017 Facebook Inc. 9bfacd38fSDennis Zhou * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> 105e81ee3eSDennis Zhou (Facebook) * 119c015162SDennis Zhou (Facebook) * The percpu allocator handles both static and dynamic areas. Percpu 129c015162SDennis Zhou (Facebook) * areas are allocated in chunks which are divided into units. There is 139c015162SDennis Zhou (Facebook) * a 1-to-1 mapping for units to possible cpus. These units are grouped 149c015162SDennis Zhou (Facebook) * based on NUMA properties of the machine. 15fbf59bc9STejun Heo * 16fbf59bc9STejun Heo * c0 c1 c2 17fbf59bc9STejun Heo * ------------------- ------------------- ------------ 18fbf59bc9STejun Heo * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 19fbf59bc9STejun Heo * ------------------- ...... ------------------- .... ------------ 20fbf59bc9STejun Heo * 219c015162SDennis Zhou (Facebook) * Allocation is done by offsets into a unit's address space. Ie., an 229c015162SDennis Zhou (Facebook) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 239c015162SDennis Zhou (Facebook) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 249c015162SDennis Zhou (Facebook) * and even sparse. Access is handled by configuring percpu base 259c015162SDennis Zhou (Facebook) * registers according to the cpu to unit mappings and offsetting the 269c015162SDennis Zhou (Facebook) * base address using pcpu_unit_size. 27fbf59bc9STejun Heo * 289c015162SDennis Zhou (Facebook) * There is special consideration for the first chunk which must handle 299c015162SDennis Zhou (Facebook) * the static percpu variables in the kernel image as allocation services 305e81ee3eSDennis Zhou (Facebook) * are not online yet. In short, the first chunk is structured like so: 319c015162SDennis Zhou (Facebook) * 329c015162SDennis Zhou (Facebook) * <Static | [Reserved] | Dynamic> 339c015162SDennis Zhou (Facebook) * 349c015162SDennis Zhou (Facebook) * The static data is copied from the original section managed by the 359c015162SDennis Zhou (Facebook) * linker. The reserved section, if non-zero, primarily manages static 369c015162SDennis Zhou (Facebook) * percpu variables from kernel modules. Finally, the dynamic section 379c015162SDennis Zhou (Facebook) * takes care of normal allocations. 38fbf59bc9STejun Heo * 395e81ee3eSDennis Zhou (Facebook) * The allocator organizes chunks into lists according to free size and 405e81ee3eSDennis Zhou (Facebook) * tries to allocate from the fullest chunk first. Each chunk is managed 415e81ee3eSDennis Zhou (Facebook) * by a bitmap with metadata blocks. The allocation map is updated on 425e81ee3eSDennis Zhou (Facebook) * every allocation and free to reflect the current state while the boundary 435e81ee3eSDennis Zhou (Facebook) * map is only updated on allocation. Each metadata block contains 445e81ee3eSDennis Zhou (Facebook) * information to help mitigate the need to iterate over large portions 455e81ee3eSDennis Zhou (Facebook) * of the bitmap. The reverse mapping from page to chunk is stored in 465e81ee3eSDennis Zhou (Facebook) * the page's index. Lastly, units are lazily backed and grow in unison. 47fbf59bc9STejun Heo * 485e81ee3eSDennis Zhou (Facebook) * There is a unique conversion that goes on here between bytes and bits. 495e81ee3eSDennis Zhou (Facebook) * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 505e81ee3eSDennis Zhou (Facebook) * tracks the number of pages it is responsible for in nr_pages. Helper 515e81ee3eSDennis Zhou (Facebook) * functions are used to convert from between the bytes, bits, and blocks. 525e81ee3eSDennis Zhou (Facebook) * All hints are managed in bits unless explicitly stated. 539c015162SDennis Zhou (Facebook) * 544091fb95SMasahiro Yamada * To use this allocator, arch code should do the following: 55fbf59bc9STejun Heo * 56fbf59bc9STejun Heo * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 57e0100983STejun Heo * regular address to percpu pointer and back if they need to be 58e0100983STejun Heo * different from the default 59fbf59bc9STejun Heo * 608d408b4bSTejun Heo * - use pcpu_setup_first_chunk() during percpu area initialization to 618d408b4bSTejun Heo * setup the first chunk containing the kernel static percpu area 62fbf59bc9STejun Heo */ 63fbf59bc9STejun Heo 64870d4b12SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 65870d4b12SJoe Perches 66fbf59bc9STejun Heo #include <linux/bitmap.h> 6757c8a661SMike Rapoport #include <linux/memblock.h> 68fd1e8a1fSTejun Heo #include <linux/err.h> 69ca460b3cSDennis Zhou (Facebook) #include <linux/lcm.h> 70fbf59bc9STejun Heo #include <linux/list.h> 71a530b795STejun Heo #include <linux/log2.h> 72fbf59bc9STejun Heo #include <linux/mm.h> 73fbf59bc9STejun Heo #include <linux/module.h> 74fbf59bc9STejun Heo #include <linux/mutex.h> 75fbf59bc9STejun Heo #include <linux/percpu.h> 76fbf59bc9STejun Heo #include <linux/pfn.h> 77fbf59bc9STejun Heo #include <linux/slab.h> 78ccea34b5STejun Heo #include <linux/spinlock.h> 79fbf59bc9STejun Heo #include <linux/vmalloc.h> 80a56dbddfSTejun Heo #include <linux/workqueue.h> 81f528f0b8SCatalin Marinas #include <linux/kmemleak.h> 8271546d10STejun Heo #include <linux/sched.h> 8328307d93SFilipe Manana #include <linux/sched/mm.h> 84fbf59bc9STejun Heo 85fbf59bc9STejun Heo #include <asm/cacheflush.h> 86e0100983STejun Heo #include <asm/sections.h> 87fbf59bc9STejun Heo #include <asm/tlbflush.h> 883b034b0dSVivek Goyal #include <asm/io.h> 89fbf59bc9STejun Heo 90df95e795SDennis Zhou #define CREATE_TRACE_POINTS 91df95e795SDennis Zhou #include <trace/events/percpu.h> 92df95e795SDennis Zhou 938fa3ed80SDennis Zhou #include "percpu-internal.h" 948fa3ed80SDennis Zhou 9540064aecSDennis Zhou (Facebook) /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */ 9640064aecSDennis Zhou (Facebook) #define PCPU_SLOT_BASE_SHIFT 5 978744d859SDennis Zhou /* chunks in slots below this are subject to being sidelined on failed alloc */ 988744d859SDennis Zhou #define PCPU_SLOT_FAIL_THRESHOLD 3 9940064aecSDennis Zhou (Facebook) 1001a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_LOW 2 1011a4d7607STejun Heo #define PCPU_EMPTY_POP_PAGES_HIGH 4 102fbf59bc9STejun Heo 103bbddff05STejun Heo #ifdef CONFIG_SMP 104e0100983STejun Heo /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 105e0100983STejun Heo #ifndef __addr_to_pcpu_ptr 106e0100983STejun Heo #define __addr_to_pcpu_ptr(addr) \ 10743cf38ebSTejun Heo (void __percpu *)((unsigned long)(addr) - \ 10843cf38ebSTejun Heo (unsigned long)pcpu_base_addr + \ 10943cf38ebSTejun Heo (unsigned long)__per_cpu_start) 110e0100983STejun Heo #endif 111e0100983STejun Heo #ifndef __pcpu_ptr_to_addr 112e0100983STejun Heo #define __pcpu_ptr_to_addr(ptr) \ 11343cf38ebSTejun Heo (void __force *)((unsigned long)(ptr) + \ 11443cf38ebSTejun Heo (unsigned long)pcpu_base_addr - \ 11543cf38ebSTejun Heo (unsigned long)__per_cpu_start) 116e0100983STejun Heo #endif 117bbddff05STejun Heo #else /* CONFIG_SMP */ 118bbddff05STejun Heo /* on UP, it's always identity mapped */ 119bbddff05STejun Heo #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 120bbddff05STejun Heo #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 121bbddff05STejun Heo #endif /* CONFIG_SMP */ 122e0100983STejun Heo 1231328710bSDaniel Micay static int pcpu_unit_pages __ro_after_init; 1241328710bSDaniel Micay static int pcpu_unit_size __ro_after_init; 1251328710bSDaniel Micay static int pcpu_nr_units __ro_after_init; 1261328710bSDaniel Micay static int pcpu_atom_size __ro_after_init; 1278fa3ed80SDennis Zhou int pcpu_nr_slots __ro_after_init; 1281328710bSDaniel Micay static size_t pcpu_chunk_struct_size __ro_after_init; 129fbf59bc9STejun Heo 130a855b84cSTejun Heo /* cpus with the lowest and highest unit addresses */ 1311328710bSDaniel Micay static unsigned int pcpu_low_unit_cpu __ro_after_init; 1321328710bSDaniel Micay static unsigned int pcpu_high_unit_cpu __ro_after_init; 1332f39e637STejun Heo 134fbf59bc9STejun Heo /* the address of the first chunk which starts with the kernel static area */ 1351328710bSDaniel Micay void *pcpu_base_addr __ro_after_init; 136fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(pcpu_base_addr); 137fbf59bc9STejun Heo 1381328710bSDaniel Micay static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 1391328710bSDaniel Micay const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 1402f39e637STejun Heo 1416563297cSTejun Heo /* group information, used for vm allocation */ 1421328710bSDaniel Micay static int pcpu_nr_groups __ro_after_init; 1431328710bSDaniel Micay static const unsigned long *pcpu_group_offsets __ro_after_init; 1441328710bSDaniel Micay static const size_t *pcpu_group_sizes __ro_after_init; 1456563297cSTejun Heo 146ae9e6bc9STejun Heo /* 147ae9e6bc9STejun Heo * The first chunk which always exists. Note that unlike other 148ae9e6bc9STejun Heo * chunks, this one can be allocated and mapped in several different 149ae9e6bc9STejun Heo * ways and thus often doesn't live in the vmalloc area. 150ae9e6bc9STejun Heo */ 1518fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 152ae9e6bc9STejun Heo 153ae9e6bc9STejun Heo /* 154ae9e6bc9STejun Heo * Optional reserved chunk. This chunk reserves part of the first 155e2266705SDennis Zhou (Facebook) * chunk and serves it for reserved allocations. When the reserved 156e2266705SDennis Zhou (Facebook) * region doesn't exist, the following variable is NULL. 157ae9e6bc9STejun Heo */ 1588fa3ed80SDennis Zhou struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 159edcb4639STejun Heo 1608fa3ed80SDennis Zhou DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 1616710e594STejun Heo static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 162fbf59bc9STejun Heo 1638fa3ed80SDennis Zhou struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ 164fbf59bc9STejun Heo 1654f996e23STejun Heo /* chunks which need their map areas extended, protected by pcpu_lock */ 1664f996e23STejun Heo static LIST_HEAD(pcpu_map_extend_chunks); 1674f996e23STejun Heo 168b539b87fSTejun Heo /* 169b539b87fSTejun Heo * The number of empty populated pages, protected by pcpu_lock. The 170b539b87fSTejun Heo * reserved chunk doesn't contribute to the count. 171b539b87fSTejun Heo */ 1726b9b6f39SDennis Zhou (Facebook) int pcpu_nr_empty_pop_pages; 173b539b87fSTejun Heo 1741a4d7607STejun Heo /* 1757e8a6304SDennis Zhou (Facebook) * The number of populated pages in use by the allocator, protected by 1767e8a6304SDennis Zhou (Facebook) * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets 1777e8a6304SDennis Zhou (Facebook) * allocated/deallocated, it is allocated/deallocated in all units of a chunk 1787e8a6304SDennis Zhou (Facebook) * and increments/decrements this count by 1). 1797e8a6304SDennis Zhou (Facebook) */ 1807e8a6304SDennis Zhou (Facebook) static unsigned long pcpu_nr_populated; 1817e8a6304SDennis Zhou (Facebook) 1827e8a6304SDennis Zhou (Facebook) /* 1831a4d7607STejun Heo * Balance work is used to populate or destroy chunks asynchronously. We 1841a4d7607STejun Heo * try to keep the number of populated free pages between 1851a4d7607STejun Heo * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 1861a4d7607STejun Heo * empty chunk. 1871a4d7607STejun Heo */ 188fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work); 189fe6bd8c3STejun Heo static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 1901a4d7607STejun Heo static bool pcpu_async_enabled __read_mostly; 1911a4d7607STejun Heo static bool pcpu_atomic_alloc_failed; 1921a4d7607STejun Heo 1931a4d7607STejun Heo static void pcpu_schedule_balance_work(void) 1941a4d7607STejun Heo { 1951a4d7607STejun Heo if (pcpu_async_enabled) 1961a4d7607STejun Heo schedule_work(&pcpu_balance_work); 1971a4d7607STejun Heo } 198a56dbddfSTejun Heo 199c0ebfdc3SDennis Zhou (Facebook) /** 200560f2c23SDennis Zhou (Facebook) * pcpu_addr_in_chunk - check if the address is served from this chunk 201560f2c23SDennis Zhou (Facebook) * @chunk: chunk of interest 202560f2c23SDennis Zhou (Facebook) * @addr: percpu address 203c0ebfdc3SDennis Zhou (Facebook) * 204c0ebfdc3SDennis Zhou (Facebook) * RETURNS: 205560f2c23SDennis Zhou (Facebook) * True if the address is served from this chunk. 206c0ebfdc3SDennis Zhou (Facebook) */ 207560f2c23SDennis Zhou (Facebook) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 208020ec653STejun Heo { 209c0ebfdc3SDennis Zhou (Facebook) void *start_addr, *end_addr; 210020ec653STejun Heo 211560f2c23SDennis Zhou (Facebook) if (!chunk) 212c0ebfdc3SDennis Zhou (Facebook) return false; 213c0ebfdc3SDennis Zhou (Facebook) 214560f2c23SDennis Zhou (Facebook) start_addr = chunk->base_addr + chunk->start_offset; 215560f2c23SDennis Zhou (Facebook) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 216560f2c23SDennis Zhou (Facebook) chunk->end_offset; 217c0ebfdc3SDennis Zhou (Facebook) 218c0ebfdc3SDennis Zhou (Facebook) return addr >= start_addr && addr < end_addr; 219020ec653STejun Heo } 220020ec653STejun Heo 221d9b55eebSTejun Heo static int __pcpu_size_to_slot(int size) 222fbf59bc9STejun Heo { 223cae3aeb8STejun Heo int highbit = fls(size); /* size is in bytes */ 224fbf59bc9STejun Heo return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 225fbf59bc9STejun Heo } 226fbf59bc9STejun Heo 227d9b55eebSTejun Heo static int pcpu_size_to_slot(int size) 228d9b55eebSTejun Heo { 229d9b55eebSTejun Heo if (size == pcpu_unit_size) 230d9b55eebSTejun Heo return pcpu_nr_slots - 1; 231d9b55eebSTejun Heo return __pcpu_size_to_slot(size); 232d9b55eebSTejun Heo } 233d9b55eebSTejun Heo 234fbf59bc9STejun Heo static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 235fbf59bc9STejun Heo { 23692c14cabSDennis Zhou const struct pcpu_block_md *chunk_md = &chunk->chunk_md; 23792c14cabSDennis Zhou 23892c14cabSDennis Zhou if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || 23992c14cabSDennis Zhou chunk_md->contig_hint == 0) 240fbf59bc9STejun Heo return 0; 241fbf59bc9STejun Heo 24292c14cabSDennis Zhou return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); 243fbf59bc9STejun Heo } 244fbf59bc9STejun Heo 24588999a89STejun Heo /* set the pointer to a chunk in a page struct */ 24688999a89STejun Heo static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 24788999a89STejun Heo { 24888999a89STejun Heo page->index = (unsigned long)pcpu; 24988999a89STejun Heo } 25088999a89STejun Heo 25188999a89STejun Heo /* obtain pointer to a chunk from a page struct */ 25288999a89STejun Heo static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 25388999a89STejun Heo { 25488999a89STejun Heo return (struct pcpu_chunk *)page->index; 25588999a89STejun Heo } 25688999a89STejun Heo 25788999a89STejun Heo static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 258fbf59bc9STejun Heo { 2592f39e637STejun Heo return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 260fbf59bc9STejun Heo } 261fbf59bc9STejun Heo 262c0ebfdc3SDennis Zhou (Facebook) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 263c0ebfdc3SDennis Zhou (Facebook) { 264c0ebfdc3SDennis Zhou (Facebook) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 265c0ebfdc3SDennis Zhou (Facebook) } 266c0ebfdc3SDennis Zhou (Facebook) 2679983b6f0STejun Heo static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 268fbf59bc9STejun Heo unsigned int cpu, int page_idx) 269fbf59bc9STejun Heo { 270c0ebfdc3SDennis Zhou (Facebook) return (unsigned long)chunk->base_addr + 271c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(cpu, page_idx); 272fbf59bc9STejun Heo } 273fbf59bc9STejun Heo 274ca460b3cSDennis Zhou (Facebook) /* 275ca460b3cSDennis Zhou (Facebook) * The following are helper functions to help access bitmaps and convert 276ca460b3cSDennis Zhou (Facebook) * between bitmap offsets to address offsets. 277ca460b3cSDennis Zhou (Facebook) */ 278ca460b3cSDennis Zhou (Facebook) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 279ca460b3cSDennis Zhou (Facebook) { 280ca460b3cSDennis Zhou (Facebook) return chunk->alloc_map + 281ca460b3cSDennis Zhou (Facebook) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 282ca460b3cSDennis Zhou (Facebook) } 283ca460b3cSDennis Zhou (Facebook) 284ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_index(int off) 285ca460b3cSDennis Zhou (Facebook) { 286ca460b3cSDennis Zhou (Facebook) return off / PCPU_BITMAP_BLOCK_BITS; 287ca460b3cSDennis Zhou (Facebook) } 288ca460b3cSDennis Zhou (Facebook) 289ca460b3cSDennis Zhou (Facebook) static unsigned long pcpu_off_to_block_off(int off) 290ca460b3cSDennis Zhou (Facebook) { 291ca460b3cSDennis Zhou (Facebook) return off & (PCPU_BITMAP_BLOCK_BITS - 1); 292ca460b3cSDennis Zhou (Facebook) } 293ca460b3cSDennis Zhou (Facebook) 294b185cd0dSDennis Zhou (Facebook) static unsigned long pcpu_block_off_to_off(int index, int off) 295b185cd0dSDennis Zhou (Facebook) { 296b185cd0dSDennis Zhou (Facebook) return index * PCPU_BITMAP_BLOCK_BITS + off; 297b185cd0dSDennis Zhou (Facebook) } 298b185cd0dSDennis Zhou (Facebook) 299382b88e9SDennis Zhou /* 300382b88e9SDennis Zhou * pcpu_next_hint - determine which hint to use 301382b88e9SDennis Zhou * @block: block of interest 302382b88e9SDennis Zhou * @alloc_bits: size of allocation 303382b88e9SDennis Zhou * 304382b88e9SDennis Zhou * This determines if we should scan based on the scan_hint or first_free. 305382b88e9SDennis Zhou * In general, we want to scan from first_free to fulfill allocations by 306382b88e9SDennis Zhou * first fit. However, if we know a scan_hint at position scan_hint_start 307382b88e9SDennis Zhou * cannot fulfill an allocation, we can begin scanning from there knowing 308382b88e9SDennis Zhou * the contig_hint will be our fallback. 309382b88e9SDennis Zhou */ 310382b88e9SDennis Zhou static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) 311382b88e9SDennis Zhou { 312382b88e9SDennis Zhou /* 313382b88e9SDennis Zhou * The three conditions below determine if we can skip past the 314382b88e9SDennis Zhou * scan_hint. First, does the scan hint exist. Second, is the 315382b88e9SDennis Zhou * contig_hint after the scan_hint (possibly not true iff 316382b88e9SDennis Zhou * contig_hint == scan_hint). Third, is the allocation request 317382b88e9SDennis Zhou * larger than the scan_hint. 318382b88e9SDennis Zhou */ 319382b88e9SDennis Zhou if (block->scan_hint && 320382b88e9SDennis Zhou block->contig_hint_start > block->scan_hint_start && 321382b88e9SDennis Zhou alloc_bits > block->scan_hint) 322382b88e9SDennis Zhou return block->scan_hint_start + block->scan_hint; 323382b88e9SDennis Zhou 324382b88e9SDennis Zhou return block->first_free; 325382b88e9SDennis Zhou } 326382b88e9SDennis Zhou 327fbf59bc9STejun Heo /** 328525ca84dSDennis Zhou (Facebook) * pcpu_next_md_free_region - finds the next hint free area 329525ca84dSDennis Zhou (Facebook) * @chunk: chunk of interest 330525ca84dSDennis Zhou (Facebook) * @bit_off: chunk offset 331525ca84dSDennis Zhou (Facebook) * @bits: size of free area 332525ca84dSDennis Zhou (Facebook) * 333525ca84dSDennis Zhou (Facebook) * Helper function for pcpu_for_each_md_free_region. It checks 334525ca84dSDennis Zhou (Facebook) * block->contig_hint and performs aggregation across blocks to find the 335525ca84dSDennis Zhou (Facebook) * next hint. It modifies bit_off and bits in-place to be consumed in the 336525ca84dSDennis Zhou (Facebook) * loop. 337525ca84dSDennis Zhou (Facebook) */ 338525ca84dSDennis Zhou (Facebook) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 339525ca84dSDennis Zhou (Facebook) int *bits) 340525ca84dSDennis Zhou (Facebook) { 341525ca84dSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 342525ca84dSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 343525ca84dSDennis Zhou (Facebook) struct pcpu_block_md *block; 344525ca84dSDennis Zhou (Facebook) 345525ca84dSDennis Zhou (Facebook) *bits = 0; 346525ca84dSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 347525ca84dSDennis Zhou (Facebook) block++, i++) { 348525ca84dSDennis Zhou (Facebook) /* handles contig area across blocks */ 349525ca84dSDennis Zhou (Facebook) if (*bits) { 350525ca84dSDennis Zhou (Facebook) *bits += block->left_free; 351525ca84dSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 352525ca84dSDennis Zhou (Facebook) continue; 353525ca84dSDennis Zhou (Facebook) return; 354525ca84dSDennis Zhou (Facebook) } 355525ca84dSDennis Zhou (Facebook) 356525ca84dSDennis Zhou (Facebook) /* 357525ca84dSDennis Zhou (Facebook) * This checks three things. First is there a contig_hint to 358525ca84dSDennis Zhou (Facebook) * check. Second, have we checked this hint before by 359525ca84dSDennis Zhou (Facebook) * comparing the block_off. Third, is this the same as the 360525ca84dSDennis Zhou (Facebook) * right contig hint. In the last case, it spills over into 361525ca84dSDennis Zhou (Facebook) * the next block and should be handled by the contig area 362525ca84dSDennis Zhou (Facebook) * across blocks code. 363525ca84dSDennis Zhou (Facebook) */ 364525ca84dSDennis Zhou (Facebook) *bits = block->contig_hint; 365525ca84dSDennis Zhou (Facebook) if (*bits && block->contig_hint_start >= block_off && 366525ca84dSDennis Zhou (Facebook) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 367525ca84dSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, 368525ca84dSDennis Zhou (Facebook) block->contig_hint_start); 369525ca84dSDennis Zhou (Facebook) return; 370525ca84dSDennis Zhou (Facebook) } 3711fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 3721fa4df3eSDennis Zhou block_off = 0; 373525ca84dSDennis Zhou (Facebook) 374525ca84dSDennis Zhou (Facebook) *bits = block->right_free; 375525ca84dSDennis Zhou (Facebook) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 376525ca84dSDennis Zhou (Facebook) } 377525ca84dSDennis Zhou (Facebook) } 378525ca84dSDennis Zhou (Facebook) 379b4c2116cSDennis Zhou (Facebook) /** 380b4c2116cSDennis Zhou (Facebook) * pcpu_next_fit_region - finds fit areas for a given allocation request 381b4c2116cSDennis Zhou (Facebook) * @chunk: chunk of interest 382b4c2116cSDennis Zhou (Facebook) * @alloc_bits: size of allocation 383b4c2116cSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 384b4c2116cSDennis Zhou (Facebook) * @bit_off: chunk offset 385b4c2116cSDennis Zhou (Facebook) * @bits: size of free area 386b4c2116cSDennis Zhou (Facebook) * 387b4c2116cSDennis Zhou (Facebook) * Finds the next free region that is viable for use with a given size and 388b4c2116cSDennis Zhou (Facebook) * alignment. This only returns if there is a valid area to be used for this 389b4c2116cSDennis Zhou (Facebook) * allocation. block->first_free is returned if the allocation request fits 390b4c2116cSDennis Zhou (Facebook) * within the block to see if the request can be fulfilled prior to the contig 391b4c2116cSDennis Zhou (Facebook) * hint. 392b4c2116cSDennis Zhou (Facebook) */ 393b4c2116cSDennis Zhou (Facebook) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 394b4c2116cSDennis Zhou (Facebook) int align, int *bit_off, int *bits) 395b4c2116cSDennis Zhou (Facebook) { 396b4c2116cSDennis Zhou (Facebook) int i = pcpu_off_to_block_index(*bit_off); 397b4c2116cSDennis Zhou (Facebook) int block_off = pcpu_off_to_block_off(*bit_off); 398b4c2116cSDennis Zhou (Facebook) struct pcpu_block_md *block; 399b4c2116cSDennis Zhou (Facebook) 400b4c2116cSDennis Zhou (Facebook) *bits = 0; 401b4c2116cSDennis Zhou (Facebook) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 402b4c2116cSDennis Zhou (Facebook) block++, i++) { 403b4c2116cSDennis Zhou (Facebook) /* handles contig area across blocks */ 404b4c2116cSDennis Zhou (Facebook) if (*bits) { 405b4c2116cSDennis Zhou (Facebook) *bits += block->left_free; 406b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 407b4c2116cSDennis Zhou (Facebook) return; 408b4c2116cSDennis Zhou (Facebook) if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 409b4c2116cSDennis Zhou (Facebook) continue; 410b4c2116cSDennis Zhou (Facebook) } 411b4c2116cSDennis Zhou (Facebook) 412b4c2116cSDennis Zhou (Facebook) /* check block->contig_hint */ 413b4c2116cSDennis Zhou (Facebook) *bits = ALIGN(block->contig_hint_start, align) - 414b4c2116cSDennis Zhou (Facebook) block->contig_hint_start; 415b4c2116cSDennis Zhou (Facebook) /* 416b4c2116cSDennis Zhou (Facebook) * This uses the block offset to determine if this has been 417b4c2116cSDennis Zhou (Facebook) * checked in the prior iteration. 418b4c2116cSDennis Zhou (Facebook) */ 419b4c2116cSDennis Zhou (Facebook) if (block->contig_hint && 420b4c2116cSDennis Zhou (Facebook) block->contig_hint_start >= block_off && 421b4c2116cSDennis Zhou (Facebook) block->contig_hint >= *bits + alloc_bits) { 422382b88e9SDennis Zhou int start = pcpu_next_hint(block, alloc_bits); 423382b88e9SDennis Zhou 424b4c2116cSDennis Zhou (Facebook) *bits += alloc_bits + block->contig_hint_start - 425382b88e9SDennis Zhou start; 426382b88e9SDennis Zhou *bit_off = pcpu_block_off_to_off(i, start); 427b4c2116cSDennis Zhou (Facebook) return; 428b4c2116cSDennis Zhou (Facebook) } 4291fa4df3eSDennis Zhou /* reset to satisfy the second predicate above */ 4301fa4df3eSDennis Zhou block_off = 0; 431b4c2116cSDennis Zhou (Facebook) 432b4c2116cSDennis Zhou (Facebook) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 433b4c2116cSDennis Zhou (Facebook) align); 434b4c2116cSDennis Zhou (Facebook) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 435b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_block_off_to_off(i, *bit_off); 436b4c2116cSDennis Zhou (Facebook) if (*bits >= alloc_bits) 437b4c2116cSDennis Zhou (Facebook) return; 438b4c2116cSDennis Zhou (Facebook) } 439b4c2116cSDennis Zhou (Facebook) 440b4c2116cSDennis Zhou (Facebook) /* no valid offsets were found - fail condition */ 441b4c2116cSDennis Zhou (Facebook) *bit_off = pcpu_chunk_map_bits(chunk); 442b4c2116cSDennis Zhou (Facebook) } 443b4c2116cSDennis Zhou (Facebook) 444525ca84dSDennis Zhou (Facebook) /* 445525ca84dSDennis Zhou (Facebook) * Metadata free area iterators. These perform aggregation of free areas 446525ca84dSDennis Zhou (Facebook) * based on the metadata blocks and return the offset @bit_off and size in 447b4c2116cSDennis Zhou (Facebook) * bits of the free area @bits. pcpu_for_each_fit_region only returns when 448b4c2116cSDennis Zhou (Facebook) * a fit is found for the allocation request. 449525ca84dSDennis Zhou (Facebook) */ 450525ca84dSDennis Zhou (Facebook) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 451525ca84dSDennis Zhou (Facebook) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 452525ca84dSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 453525ca84dSDennis Zhou (Facebook) (bit_off) += (bits) + 1, \ 454525ca84dSDennis Zhou (Facebook) pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 455525ca84dSDennis Zhou (Facebook) 456b4c2116cSDennis Zhou (Facebook) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 457b4c2116cSDennis Zhou (Facebook) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 458b4c2116cSDennis Zhou (Facebook) &(bits)); \ 459b4c2116cSDennis Zhou (Facebook) (bit_off) < pcpu_chunk_map_bits((chunk)); \ 460b4c2116cSDennis Zhou (Facebook) (bit_off) += (bits), \ 461b4c2116cSDennis Zhou (Facebook) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 462b4c2116cSDennis Zhou (Facebook) &(bits))) 463b4c2116cSDennis Zhou (Facebook) 464525ca84dSDennis Zhou (Facebook) /** 46590459ce0SBob Liu * pcpu_mem_zalloc - allocate memory 4661880d93bSTejun Heo * @size: bytes to allocate 46747504ee0SDennis Zhou * @gfp: allocation flags 468fbf59bc9STejun Heo * 4691880d93bSTejun Heo * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 47047504ee0SDennis Zhou * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. 47147504ee0SDennis Zhou * This is to facilitate passing through whitelisted flags. The 47247504ee0SDennis Zhou * returned memory is always zeroed. 473fbf59bc9STejun Heo * 474fbf59bc9STejun Heo * RETURNS: 4751880d93bSTejun Heo * Pointer to the allocated area on success, NULL on failure. 476fbf59bc9STejun Heo */ 47747504ee0SDennis Zhou static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) 478fbf59bc9STejun Heo { 479099a19d9STejun Heo if (WARN_ON_ONCE(!slab_is_available())) 480099a19d9STejun Heo return NULL; 481099a19d9STejun Heo 482fbf59bc9STejun Heo if (size <= PAGE_SIZE) 483554fef1cSDennis Zhou return kzalloc(size, gfp); 4847af4c093SJesper Juhl else 485*88dca4caSChristoph Hellwig return __vmalloc(size, gfp | __GFP_ZERO); 4861880d93bSTejun Heo } 487fbf59bc9STejun Heo 4881880d93bSTejun Heo /** 4891880d93bSTejun Heo * pcpu_mem_free - free memory 4901880d93bSTejun Heo * @ptr: memory to free 4911880d93bSTejun Heo * 49290459ce0SBob Liu * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 4931880d93bSTejun Heo */ 4941d5cfdb0STetsuo Handa static void pcpu_mem_free(void *ptr) 4951880d93bSTejun Heo { 4961d5cfdb0STetsuo Handa kvfree(ptr); 497fbf59bc9STejun Heo } 498fbf59bc9STejun Heo 4998744d859SDennis Zhou static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, 5008744d859SDennis Zhou bool move_front) 5018744d859SDennis Zhou { 5028744d859SDennis Zhou if (chunk != pcpu_reserved_chunk) { 5038744d859SDennis Zhou if (move_front) 5048744d859SDennis Zhou list_move(&chunk->list, &pcpu_slot[slot]); 5058744d859SDennis Zhou else 5068744d859SDennis Zhou list_move_tail(&chunk->list, &pcpu_slot[slot]); 5078744d859SDennis Zhou } 5088744d859SDennis Zhou } 5098744d859SDennis Zhou 5108744d859SDennis Zhou static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) 5118744d859SDennis Zhou { 5128744d859SDennis Zhou __pcpu_chunk_move(chunk, slot, true); 5138744d859SDennis Zhou } 5148744d859SDennis Zhou 515fbf59bc9STejun Heo /** 516fbf59bc9STejun Heo * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 517fbf59bc9STejun Heo * @chunk: chunk of interest 518fbf59bc9STejun Heo * @oslot: the previous slot it was on 519fbf59bc9STejun Heo * 520fbf59bc9STejun Heo * This function is called after an allocation or free changed @chunk. 521fbf59bc9STejun Heo * New slot according to the changed state is determined and @chunk is 522edcb4639STejun Heo * moved to the slot. Note that the reserved chunk is never put on 523edcb4639STejun Heo * chunk slots. 524ccea34b5STejun Heo * 525ccea34b5STejun Heo * CONTEXT: 526ccea34b5STejun Heo * pcpu_lock. 527fbf59bc9STejun Heo */ 528fbf59bc9STejun Heo static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 529fbf59bc9STejun Heo { 530fbf59bc9STejun Heo int nslot = pcpu_chunk_slot(chunk); 531fbf59bc9STejun Heo 5328744d859SDennis Zhou if (oslot != nslot) 5338744d859SDennis Zhou __pcpu_chunk_move(chunk, nslot, oslot < nslot); 53440064aecSDennis Zhou (Facebook) } 53540064aecSDennis Zhou (Facebook) 53640064aecSDennis Zhou (Facebook) /* 537b239f7daSDennis Zhou * pcpu_update_empty_pages - update empty page counters 538b239f7daSDennis Zhou * @chunk: chunk of interest 539b239f7daSDennis Zhou * @nr: nr of empty pages 54040064aecSDennis Zhou (Facebook) * 541b239f7daSDennis Zhou * This is used to keep track of the empty pages now based on the premise 542b239f7daSDennis Zhou * a md_block covers a page. The hint update functions recognize if a block 543b239f7daSDennis Zhou * is made full or broken to calculate deltas for keeping track of free pages. 54440064aecSDennis Zhou (Facebook) */ 545b239f7daSDennis Zhou static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) 546b239f7daSDennis Zhou { 547b239f7daSDennis Zhou chunk->nr_empty_pop_pages += nr; 54840064aecSDennis Zhou (Facebook) if (chunk != pcpu_reserved_chunk) 549b239f7daSDennis Zhou pcpu_nr_empty_pop_pages += nr; 55040064aecSDennis Zhou (Facebook) } 55140064aecSDennis Zhou (Facebook) 552d9f3a01eSDennis Zhou /* 553d9f3a01eSDennis Zhou * pcpu_region_overlap - determines if two regions overlap 554d9f3a01eSDennis Zhou * @a: start of first region, inclusive 555d9f3a01eSDennis Zhou * @b: end of first region, exclusive 556d9f3a01eSDennis Zhou * @x: start of second region, inclusive 557d9f3a01eSDennis Zhou * @y: end of second region, exclusive 558d9f3a01eSDennis Zhou * 559d9f3a01eSDennis Zhou * This is used to determine if the hint region [a, b) overlaps with the 560d9f3a01eSDennis Zhou * allocated region [x, y). 561d9f3a01eSDennis Zhou */ 562d9f3a01eSDennis Zhou static inline bool pcpu_region_overlap(int a, int b, int x, int y) 563d9f3a01eSDennis Zhou { 564d9f3a01eSDennis Zhou return (a < y) && (x < b); 56540064aecSDennis Zhou (Facebook) } 56640064aecSDennis Zhou (Facebook) 56740064aecSDennis Zhou (Facebook) /** 568ca460b3cSDennis Zhou (Facebook) * pcpu_block_update - updates a block given a free area 569ca460b3cSDennis Zhou (Facebook) * @block: block of interest 570ca460b3cSDennis Zhou (Facebook) * @start: start offset in block 571ca460b3cSDennis Zhou (Facebook) * @end: end offset in block 572ca460b3cSDennis Zhou (Facebook) * 573ca460b3cSDennis Zhou (Facebook) * Updates a block given a known free area. The region [start, end) is 574268625a6SDennis Zhou (Facebook) * expected to be the entirety of the free area within a block. Chooses 575268625a6SDennis Zhou (Facebook) * the best starting offset if the contig hints are equal. 576ca460b3cSDennis Zhou (Facebook) */ 577ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 578ca460b3cSDennis Zhou (Facebook) { 579ca460b3cSDennis Zhou (Facebook) int contig = end - start; 580ca460b3cSDennis Zhou (Facebook) 581ca460b3cSDennis Zhou (Facebook) block->first_free = min(block->first_free, start); 582ca460b3cSDennis Zhou (Facebook) if (start == 0) 583ca460b3cSDennis Zhou (Facebook) block->left_free = contig; 584ca460b3cSDennis Zhou (Facebook) 585047924c9SDennis Zhou if (end == block->nr_bits) 586ca460b3cSDennis Zhou (Facebook) block->right_free = contig; 587ca460b3cSDennis Zhou (Facebook) 588ca460b3cSDennis Zhou (Facebook) if (contig > block->contig_hint) { 589382b88e9SDennis Zhou /* promote the old contig_hint to be the new scan_hint */ 590382b88e9SDennis Zhou if (start > block->contig_hint_start) { 591382b88e9SDennis Zhou if (block->contig_hint > block->scan_hint) { 592382b88e9SDennis Zhou block->scan_hint_start = 593382b88e9SDennis Zhou block->contig_hint_start; 594382b88e9SDennis Zhou block->scan_hint = block->contig_hint; 595382b88e9SDennis Zhou } else if (start < block->scan_hint_start) { 596382b88e9SDennis Zhou /* 597382b88e9SDennis Zhou * The old contig_hint == scan_hint. But, the 598382b88e9SDennis Zhou * new contig is larger so hold the invariant 599382b88e9SDennis Zhou * scan_hint_start < contig_hint_start. 600382b88e9SDennis Zhou */ 601382b88e9SDennis Zhou block->scan_hint = 0; 602382b88e9SDennis Zhou } 603382b88e9SDennis Zhou } else { 604382b88e9SDennis Zhou block->scan_hint = 0; 605382b88e9SDennis Zhou } 606ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = start; 607ca460b3cSDennis Zhou (Facebook) block->contig_hint = contig; 608382b88e9SDennis Zhou } else if (contig == block->contig_hint) { 609382b88e9SDennis Zhou if (block->contig_hint_start && 610382b88e9SDennis Zhou (!start || 611382b88e9SDennis Zhou __ffs(start) > __ffs(block->contig_hint_start))) { 612382b88e9SDennis Zhou /* start has a better alignment so use it */ 613268625a6SDennis Zhou (Facebook) block->contig_hint_start = start; 614382b88e9SDennis Zhou if (start < block->scan_hint_start && 615382b88e9SDennis Zhou block->contig_hint > block->scan_hint) 616382b88e9SDennis Zhou block->scan_hint = 0; 617382b88e9SDennis Zhou } else if (start > block->scan_hint_start || 618382b88e9SDennis Zhou block->contig_hint > block->scan_hint) { 619382b88e9SDennis Zhou /* 620382b88e9SDennis Zhou * Knowing contig == contig_hint, update the scan_hint 621382b88e9SDennis Zhou * if it is farther than or larger than the current 622382b88e9SDennis Zhou * scan_hint. 623382b88e9SDennis Zhou */ 624382b88e9SDennis Zhou block->scan_hint_start = start; 625382b88e9SDennis Zhou block->scan_hint = contig; 626382b88e9SDennis Zhou } 627382b88e9SDennis Zhou } else { 628382b88e9SDennis Zhou /* 629382b88e9SDennis Zhou * The region is smaller than the contig_hint. So only update 630382b88e9SDennis Zhou * the scan_hint if it is larger than or equal and farther than 631382b88e9SDennis Zhou * the current scan_hint. 632382b88e9SDennis Zhou */ 633382b88e9SDennis Zhou if ((start < block->contig_hint_start && 634382b88e9SDennis Zhou (contig > block->scan_hint || 635382b88e9SDennis Zhou (contig == block->scan_hint && 636382b88e9SDennis Zhou start > block->scan_hint_start)))) { 637382b88e9SDennis Zhou block->scan_hint_start = start; 638382b88e9SDennis Zhou block->scan_hint = contig; 639382b88e9SDennis Zhou } 640ca460b3cSDennis Zhou (Facebook) } 641ca460b3cSDennis Zhou (Facebook) } 642ca460b3cSDennis Zhou (Facebook) 643b89462a9SDennis Zhou /* 644b89462a9SDennis Zhou * pcpu_block_update_scan - update a block given a free area from a scan 645b89462a9SDennis Zhou * @chunk: chunk of interest 646b89462a9SDennis Zhou * @bit_off: chunk offset 647b89462a9SDennis Zhou * @bits: size of free area 648b89462a9SDennis Zhou * 649b89462a9SDennis Zhou * Finding the final allocation spot first goes through pcpu_find_block_fit() 650b89462a9SDennis Zhou * to find a block that can hold the allocation and then pcpu_alloc_area() 651b89462a9SDennis Zhou * where a scan is used. When allocations require specific alignments, 652b89462a9SDennis Zhou * we can inadvertently create holes which will not be seen in the alloc 653b89462a9SDennis Zhou * or free paths. 654b89462a9SDennis Zhou * 655b89462a9SDennis Zhou * This takes a given free area hole and updates a block as it may change the 656b89462a9SDennis Zhou * scan_hint. We need to scan backwards to ensure we don't miss free bits 657b89462a9SDennis Zhou * from alignment. 658b89462a9SDennis Zhou */ 659b89462a9SDennis Zhou static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, 660b89462a9SDennis Zhou int bits) 661b89462a9SDennis Zhou { 662b89462a9SDennis Zhou int s_off = pcpu_off_to_block_off(bit_off); 663b89462a9SDennis Zhou int e_off = s_off + bits; 664b89462a9SDennis Zhou int s_index, l_bit; 665b89462a9SDennis Zhou struct pcpu_block_md *block; 666b89462a9SDennis Zhou 667b89462a9SDennis Zhou if (e_off > PCPU_BITMAP_BLOCK_BITS) 668b89462a9SDennis Zhou return; 669b89462a9SDennis Zhou 670b89462a9SDennis Zhou s_index = pcpu_off_to_block_index(bit_off); 671b89462a9SDennis Zhou block = chunk->md_blocks + s_index; 672b89462a9SDennis Zhou 673b89462a9SDennis Zhou /* scan backwards in case of alignment skipping free bits */ 674b89462a9SDennis Zhou l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); 675b89462a9SDennis Zhou s_off = (s_off == l_bit) ? 0 : l_bit + 1; 676b89462a9SDennis Zhou 677b89462a9SDennis Zhou pcpu_block_update(block, s_off, e_off); 678b89462a9SDennis Zhou } 679b89462a9SDennis Zhou 680ca460b3cSDennis Zhou (Facebook) /** 68192c14cabSDennis Zhou * pcpu_chunk_refresh_hint - updates metadata about a chunk 68292c14cabSDennis Zhou * @chunk: chunk of interest 683d33d9f3dSDennis Zhou * @full_scan: if we should scan from the beginning 68492c14cabSDennis Zhou * 68592c14cabSDennis Zhou * Iterates over the metadata blocks to find the largest contig area. 686d33d9f3dSDennis Zhou * A full scan can be avoided on the allocation path as this is triggered 687d33d9f3dSDennis Zhou * if we broke the contig_hint. In doing so, the scan_hint will be before 688d33d9f3dSDennis Zhou * the contig_hint or after if the scan_hint == contig_hint. This cannot 689d33d9f3dSDennis Zhou * be prevented on freeing as we want to find the largest area possibly 690d33d9f3dSDennis Zhou * spanning blocks. 69192c14cabSDennis Zhou */ 692d33d9f3dSDennis Zhou static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) 69392c14cabSDennis Zhou { 69492c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 69592c14cabSDennis Zhou int bit_off, bits; 69692c14cabSDennis Zhou 697d33d9f3dSDennis Zhou /* promote scan_hint to contig_hint */ 698d33d9f3dSDennis Zhou if (!full_scan && chunk_md->scan_hint) { 699d33d9f3dSDennis Zhou bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; 700d33d9f3dSDennis Zhou chunk_md->contig_hint_start = chunk_md->scan_hint_start; 701d33d9f3dSDennis Zhou chunk_md->contig_hint = chunk_md->scan_hint; 702d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 703d33d9f3dSDennis Zhou } else { 70492c14cabSDennis Zhou bit_off = chunk_md->first_free; 705d33d9f3dSDennis Zhou chunk_md->contig_hint = 0; 706d33d9f3dSDennis Zhou } 707d33d9f3dSDennis Zhou 70892c14cabSDennis Zhou bits = 0; 709e837dfdeSDennis Zhou pcpu_for_each_md_free_region(chunk, bit_off, bits) 71092c14cabSDennis Zhou pcpu_block_update(chunk_md, bit_off, bit_off + bits); 711ca460b3cSDennis Zhou (Facebook) } 712ca460b3cSDennis Zhou (Facebook) 713ca460b3cSDennis Zhou (Facebook) /** 714ca460b3cSDennis Zhou (Facebook) * pcpu_block_refresh_hint 715ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 716ca460b3cSDennis Zhou (Facebook) * @index: index of the metadata block 717ca460b3cSDennis Zhou (Facebook) * 718ca460b3cSDennis Zhou (Facebook) * Scans over the block beginning at first_free and updates the block 719ca460b3cSDennis Zhou (Facebook) * metadata accordingly. 720ca460b3cSDennis Zhou (Facebook) */ 721ca460b3cSDennis Zhou (Facebook) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 722ca460b3cSDennis Zhou (Facebook) { 723ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *block = chunk->md_blocks + index; 724ca460b3cSDennis Zhou (Facebook) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 725e837dfdeSDennis Zhou unsigned int rs, re, start; /* region start, region end */ 726ca460b3cSDennis Zhou (Facebook) 727da3afdd5SDennis Zhou /* promote scan_hint to contig_hint */ 728da3afdd5SDennis Zhou if (block->scan_hint) { 729da3afdd5SDennis Zhou start = block->scan_hint_start + block->scan_hint; 730da3afdd5SDennis Zhou block->contig_hint_start = block->scan_hint_start; 731da3afdd5SDennis Zhou block->contig_hint = block->scan_hint; 732da3afdd5SDennis Zhou block->scan_hint = 0; 733da3afdd5SDennis Zhou } else { 734da3afdd5SDennis Zhou start = block->first_free; 735ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 736da3afdd5SDennis Zhou } 737da3afdd5SDennis Zhou 738da3afdd5SDennis Zhou block->right_free = 0; 739ca460b3cSDennis Zhou (Facebook) 740ca460b3cSDennis Zhou (Facebook) /* iterate over free areas and update the contig hints */ 741e837dfdeSDennis Zhou bitmap_for_each_clear_region(alloc_map, rs, re, start, 742e837dfdeSDennis Zhou PCPU_BITMAP_BLOCK_BITS) 743ca460b3cSDennis Zhou (Facebook) pcpu_block_update(block, rs, re); 744ca460b3cSDennis Zhou (Facebook) } 745ca460b3cSDennis Zhou (Facebook) 746ca460b3cSDennis Zhou (Facebook) /** 747ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_alloc - update hint on allocation path 748ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 749ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 750ca460b3cSDennis Zhou (Facebook) * @bits: size of request 751fc304334SDennis Zhou (Facebook) * 752fc304334SDennis Zhou (Facebook) * Updates metadata for the allocation path. The metadata only has to be 753fc304334SDennis Zhou (Facebook) * refreshed by a full scan iff the chunk's contig hint is broken. Block level 754fc304334SDennis Zhou (Facebook) * scans are required if the block's contig hint is broken. 755ca460b3cSDennis Zhou (Facebook) */ 756ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 757ca460b3cSDennis Zhou (Facebook) int bits) 758ca460b3cSDennis Zhou (Facebook) { 75992c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 760b239f7daSDennis Zhou int nr_empty_pages = 0; 761ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 762ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 763ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 764ca460b3cSDennis Zhou (Facebook) 765ca460b3cSDennis Zhou (Facebook) /* 766ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 767ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 768ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 769ca460b3cSDennis Zhou (Facebook) * range. 770ca460b3cSDennis Zhou (Facebook) */ 771ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 772ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 773ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 774ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 775ca460b3cSDennis Zhou (Facebook) 776ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 777ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 778ca460b3cSDennis Zhou (Facebook) 779ca460b3cSDennis Zhou (Facebook) /* 780ca460b3cSDennis Zhou (Facebook) * Update s_block. 781fc304334SDennis Zhou (Facebook) * block->first_free must be updated if the allocation takes its place. 782fc304334SDennis Zhou (Facebook) * If the allocation breaks the contig_hint, a scan is required to 783fc304334SDennis Zhou (Facebook) * restore this hint. 784ca460b3cSDennis Zhou (Facebook) */ 785b239f7daSDennis Zhou if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 786b239f7daSDennis Zhou nr_empty_pages++; 787b239f7daSDennis Zhou 788fc304334SDennis Zhou (Facebook) if (s_off == s_block->first_free) 789fc304334SDennis Zhou (Facebook) s_block->first_free = find_next_zero_bit( 790fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, s_index), 791fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, 792fc304334SDennis Zhou (Facebook) s_off + bits); 793fc304334SDennis Zhou (Facebook) 794382b88e9SDennis Zhou if (pcpu_region_overlap(s_block->scan_hint_start, 795382b88e9SDennis Zhou s_block->scan_hint_start + s_block->scan_hint, 796382b88e9SDennis Zhou s_off, 797382b88e9SDennis Zhou s_off + bits)) 798382b88e9SDennis Zhou s_block->scan_hint = 0; 799382b88e9SDennis Zhou 800d9f3a01eSDennis Zhou if (pcpu_region_overlap(s_block->contig_hint_start, 801d9f3a01eSDennis Zhou s_block->contig_hint_start + 802d9f3a01eSDennis Zhou s_block->contig_hint, 803d9f3a01eSDennis Zhou s_off, 804d9f3a01eSDennis Zhou s_off + bits)) { 805fc304334SDennis Zhou (Facebook) /* block contig hint is broken - scan to fix it */ 806da3afdd5SDennis Zhou if (!s_off) 807da3afdd5SDennis Zhou s_block->left_free = 0; 808ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, s_index); 809fc304334SDennis Zhou (Facebook) } else { 810fc304334SDennis Zhou (Facebook) /* update left and right contig manually */ 811fc304334SDennis Zhou (Facebook) s_block->left_free = min(s_block->left_free, s_off); 812fc304334SDennis Zhou (Facebook) if (s_index == e_index) 813fc304334SDennis Zhou (Facebook) s_block->right_free = min_t(int, s_block->right_free, 814fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 815fc304334SDennis Zhou (Facebook) else 816fc304334SDennis Zhou (Facebook) s_block->right_free = 0; 817fc304334SDennis Zhou (Facebook) } 818ca460b3cSDennis Zhou (Facebook) 819ca460b3cSDennis Zhou (Facebook) /* 820ca460b3cSDennis Zhou (Facebook) * Update e_block. 821ca460b3cSDennis Zhou (Facebook) */ 822ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 823b239f7daSDennis Zhou if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 824b239f7daSDennis Zhou nr_empty_pages++; 825b239f7daSDennis Zhou 826fc304334SDennis Zhou (Facebook) /* 827fc304334SDennis Zhou (Facebook) * When the allocation is across blocks, the end is along 828fc304334SDennis Zhou (Facebook) * the left part of the e_block. 829fc304334SDennis Zhou (Facebook) */ 830fc304334SDennis Zhou (Facebook) e_block->first_free = find_next_zero_bit( 831fc304334SDennis Zhou (Facebook) pcpu_index_alloc_map(chunk, e_index), 832fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, e_off); 833fc304334SDennis Zhou (Facebook) 834fc304334SDennis Zhou (Facebook) if (e_off == PCPU_BITMAP_BLOCK_BITS) { 835fc304334SDennis Zhou (Facebook) /* reset the block */ 836fc304334SDennis Zhou (Facebook) e_block++; 837fc304334SDennis Zhou (Facebook) } else { 838382b88e9SDennis Zhou if (e_off > e_block->scan_hint_start) 839382b88e9SDennis Zhou e_block->scan_hint = 0; 840382b88e9SDennis Zhou 841da3afdd5SDennis Zhou e_block->left_free = 0; 842fc304334SDennis Zhou (Facebook) if (e_off > e_block->contig_hint_start) { 843fc304334SDennis Zhou (Facebook) /* contig hint is broken - scan to fix it */ 844ca460b3cSDennis Zhou (Facebook) pcpu_block_refresh_hint(chunk, e_index); 845fc304334SDennis Zhou (Facebook) } else { 846fc304334SDennis Zhou (Facebook) e_block->right_free = 847fc304334SDennis Zhou (Facebook) min_t(int, e_block->right_free, 848fc304334SDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS - e_off); 849fc304334SDennis Zhou (Facebook) } 850fc304334SDennis Zhou (Facebook) } 851ca460b3cSDennis Zhou (Facebook) 852ca460b3cSDennis Zhou (Facebook) /* update in-between md_blocks */ 853b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 854ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 855382b88e9SDennis Zhou block->scan_hint = 0; 856ca460b3cSDennis Zhou (Facebook) block->contig_hint = 0; 857ca460b3cSDennis Zhou (Facebook) block->left_free = 0; 858ca460b3cSDennis Zhou (Facebook) block->right_free = 0; 859ca460b3cSDennis Zhou (Facebook) } 860ca460b3cSDennis Zhou (Facebook) } 861ca460b3cSDennis Zhou (Facebook) 862b239f7daSDennis Zhou if (nr_empty_pages) 863b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr_empty_pages); 864b239f7daSDennis Zhou 865d33d9f3dSDennis Zhou if (pcpu_region_overlap(chunk_md->scan_hint_start, 866d33d9f3dSDennis Zhou chunk_md->scan_hint_start + 867d33d9f3dSDennis Zhou chunk_md->scan_hint, 868d33d9f3dSDennis Zhou bit_off, 869d33d9f3dSDennis Zhou bit_off + bits)) 870d33d9f3dSDennis Zhou chunk_md->scan_hint = 0; 871d33d9f3dSDennis Zhou 872fc304334SDennis Zhou (Facebook) /* 873fc304334SDennis Zhou (Facebook) * The only time a full chunk scan is required is if the chunk 874fc304334SDennis Zhou (Facebook) * contig hint is broken. Otherwise, it means a smaller space 875fc304334SDennis Zhou (Facebook) * was used and therefore the chunk contig hint is still correct. 876fc304334SDennis Zhou (Facebook) */ 87792c14cabSDennis Zhou if (pcpu_region_overlap(chunk_md->contig_hint_start, 87892c14cabSDennis Zhou chunk_md->contig_hint_start + 87992c14cabSDennis Zhou chunk_md->contig_hint, 880d9f3a01eSDennis Zhou bit_off, 881d9f3a01eSDennis Zhou bit_off + bits)) 882d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, false); 883ca460b3cSDennis Zhou (Facebook) } 884ca460b3cSDennis Zhou (Facebook) 885ca460b3cSDennis Zhou (Facebook) /** 886ca460b3cSDennis Zhou (Facebook) * pcpu_block_update_hint_free - updates the block hints on the free path 887ca460b3cSDennis Zhou (Facebook) * @chunk: chunk of interest 888ca460b3cSDennis Zhou (Facebook) * @bit_off: chunk offset 889ca460b3cSDennis Zhou (Facebook) * @bits: size of request 890b185cd0dSDennis Zhou (Facebook) * 891b185cd0dSDennis Zhou (Facebook) * Updates metadata for the allocation path. This avoids a blind block 892b185cd0dSDennis Zhou (Facebook) * refresh by making use of the block contig hints. If this fails, it scans 893b185cd0dSDennis Zhou (Facebook) * forward and backward to determine the extent of the free area. This is 894b185cd0dSDennis Zhou (Facebook) * capped at the boundary of blocks. 895b185cd0dSDennis Zhou (Facebook) * 896b185cd0dSDennis Zhou (Facebook) * A chunk update is triggered if a page becomes free, a block becomes free, 897b185cd0dSDennis Zhou (Facebook) * or the free spans across blocks. This tradeoff is to minimize iterating 89892c14cabSDennis Zhou * over the block metadata to update chunk_md->contig_hint. 89992c14cabSDennis Zhou * chunk_md->contig_hint may be off by up to a page, but it will never be more 90092c14cabSDennis Zhou * than the available space. If the contig hint is contained in one block, it 90192c14cabSDennis Zhou * will be accurate. 902ca460b3cSDennis Zhou (Facebook) */ 903ca460b3cSDennis Zhou (Facebook) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 904ca460b3cSDennis Zhou (Facebook) int bits) 905ca460b3cSDennis Zhou (Facebook) { 906b239f7daSDennis Zhou int nr_empty_pages = 0; 907ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *s_block, *e_block, *block; 908ca460b3cSDennis Zhou (Facebook) int s_index, e_index; /* block indexes of the freed allocation */ 909ca460b3cSDennis Zhou (Facebook) int s_off, e_off; /* block offsets of the freed allocation */ 910b185cd0dSDennis Zhou (Facebook) int start, end; /* start and end of the whole free area */ 911ca460b3cSDennis Zhou (Facebook) 912ca460b3cSDennis Zhou (Facebook) /* 913ca460b3cSDennis Zhou (Facebook) * Calculate per block offsets. 914ca460b3cSDennis Zhou (Facebook) * The calculation uses an inclusive range, but the resulting offsets 915ca460b3cSDennis Zhou (Facebook) * are [start, end). e_index always points to the last block in the 916ca460b3cSDennis Zhou (Facebook) * range. 917ca460b3cSDennis Zhou (Facebook) */ 918ca460b3cSDennis Zhou (Facebook) s_index = pcpu_off_to_block_index(bit_off); 919ca460b3cSDennis Zhou (Facebook) e_index = pcpu_off_to_block_index(bit_off + bits - 1); 920ca460b3cSDennis Zhou (Facebook) s_off = pcpu_off_to_block_off(bit_off); 921ca460b3cSDennis Zhou (Facebook) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 922ca460b3cSDennis Zhou (Facebook) 923ca460b3cSDennis Zhou (Facebook) s_block = chunk->md_blocks + s_index; 924ca460b3cSDennis Zhou (Facebook) e_block = chunk->md_blocks + e_index; 925ca460b3cSDennis Zhou (Facebook) 926b185cd0dSDennis Zhou (Facebook) /* 927b185cd0dSDennis Zhou (Facebook) * Check if the freed area aligns with the block->contig_hint. 928b185cd0dSDennis Zhou (Facebook) * If it does, then the scan to find the beginning/end of the 929b185cd0dSDennis Zhou (Facebook) * larger free area can be avoided. 930b185cd0dSDennis Zhou (Facebook) * 931b185cd0dSDennis Zhou (Facebook) * start and end refer to beginning and end of the free area 932b185cd0dSDennis Zhou (Facebook) * within each their respective blocks. This is not necessarily 933b185cd0dSDennis Zhou (Facebook) * the entire free area as it may span blocks past the beginning 934b185cd0dSDennis Zhou (Facebook) * or end of the block. 935b185cd0dSDennis Zhou (Facebook) */ 936b185cd0dSDennis Zhou (Facebook) start = s_off; 937b185cd0dSDennis Zhou (Facebook) if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 938b185cd0dSDennis Zhou (Facebook) start = s_block->contig_hint_start; 939b185cd0dSDennis Zhou (Facebook) } else { 940b185cd0dSDennis Zhou (Facebook) /* 941b185cd0dSDennis Zhou (Facebook) * Scan backwards to find the extent of the free area. 942b185cd0dSDennis Zhou (Facebook) * find_last_bit returns the starting bit, so if the start bit 943b185cd0dSDennis Zhou (Facebook) * is returned, that means there was no last bit and the 944b185cd0dSDennis Zhou (Facebook) * remainder of the chunk is free. 945b185cd0dSDennis Zhou (Facebook) */ 946b185cd0dSDennis Zhou (Facebook) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 947b185cd0dSDennis Zhou (Facebook) start); 948b185cd0dSDennis Zhou (Facebook) start = (start == l_bit) ? 0 : l_bit + 1; 949b185cd0dSDennis Zhou (Facebook) } 950b185cd0dSDennis Zhou (Facebook) 951b185cd0dSDennis Zhou (Facebook) end = e_off; 952b185cd0dSDennis Zhou (Facebook) if (e_off == e_block->contig_hint_start) 953b185cd0dSDennis Zhou (Facebook) end = e_block->contig_hint_start + e_block->contig_hint; 954b185cd0dSDennis Zhou (Facebook) else 955b185cd0dSDennis Zhou (Facebook) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 956b185cd0dSDennis Zhou (Facebook) PCPU_BITMAP_BLOCK_BITS, end); 957b185cd0dSDennis Zhou (Facebook) 958ca460b3cSDennis Zhou (Facebook) /* update s_block */ 959b185cd0dSDennis Zhou (Facebook) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 960b239f7daSDennis Zhou if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) 961b239f7daSDennis Zhou nr_empty_pages++; 962b185cd0dSDennis Zhou (Facebook) pcpu_block_update(s_block, start, e_off); 963ca460b3cSDennis Zhou (Facebook) 964ca460b3cSDennis Zhou (Facebook) /* freeing in the same block */ 965ca460b3cSDennis Zhou (Facebook) if (s_index != e_index) { 966ca460b3cSDennis Zhou (Facebook) /* update e_block */ 967b239f7daSDennis Zhou if (end == PCPU_BITMAP_BLOCK_BITS) 968b239f7daSDennis Zhou nr_empty_pages++; 969b185cd0dSDennis Zhou (Facebook) pcpu_block_update(e_block, 0, end); 970ca460b3cSDennis Zhou (Facebook) 971ca460b3cSDennis Zhou (Facebook) /* reset md_blocks in the middle */ 972b239f7daSDennis Zhou nr_empty_pages += (e_index - s_index - 1); 973ca460b3cSDennis Zhou (Facebook) for (block = s_block + 1; block < e_block; block++) { 974ca460b3cSDennis Zhou (Facebook) block->first_free = 0; 975382b88e9SDennis Zhou block->scan_hint = 0; 976ca460b3cSDennis Zhou (Facebook) block->contig_hint_start = 0; 977ca460b3cSDennis Zhou (Facebook) block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 978ca460b3cSDennis Zhou (Facebook) block->left_free = PCPU_BITMAP_BLOCK_BITS; 979ca460b3cSDennis Zhou (Facebook) block->right_free = PCPU_BITMAP_BLOCK_BITS; 980ca460b3cSDennis Zhou (Facebook) } 981ca460b3cSDennis Zhou (Facebook) } 982ca460b3cSDennis Zhou (Facebook) 983b239f7daSDennis Zhou if (nr_empty_pages) 984b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr_empty_pages); 985b239f7daSDennis Zhou 986b185cd0dSDennis Zhou (Facebook) /* 987b239f7daSDennis Zhou * Refresh chunk metadata when the free makes a block free or spans 988b239f7daSDennis Zhou * across blocks. The contig_hint may be off by up to a page, but if 989b239f7daSDennis Zhou * the contig_hint is contained in a block, it will be accurate with 990b239f7daSDennis Zhou * the else condition below. 991b185cd0dSDennis Zhou (Facebook) */ 992b239f7daSDennis Zhou if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) 993d33d9f3dSDennis Zhou pcpu_chunk_refresh_hint(chunk, true); 994b185cd0dSDennis Zhou (Facebook) else 99592c14cabSDennis Zhou pcpu_block_update(&chunk->chunk_md, 99692c14cabSDennis Zhou pcpu_block_off_to_off(s_index, start), 99792c14cabSDennis Zhou end); 998ca460b3cSDennis Zhou (Facebook) } 999ca460b3cSDennis Zhou (Facebook) 1000ca460b3cSDennis Zhou (Facebook) /** 100140064aecSDennis Zhou (Facebook) * pcpu_is_populated - determines if the region is populated 100240064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 100340064aecSDennis Zhou (Facebook) * @bit_off: chunk offset 100440064aecSDennis Zhou (Facebook) * @bits: size of area 100540064aecSDennis Zhou (Facebook) * @next_off: return value for the next offset to start searching 100640064aecSDennis Zhou (Facebook) * 100740064aecSDennis Zhou (Facebook) * For atomic allocations, check if the backing pages are populated. 100840064aecSDennis Zhou (Facebook) * 100940064aecSDennis Zhou (Facebook) * RETURNS: 101040064aecSDennis Zhou (Facebook) * Bool if the backing pages are populated. 101140064aecSDennis Zhou (Facebook) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 101240064aecSDennis Zhou (Facebook) */ 101340064aecSDennis Zhou (Facebook) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 101440064aecSDennis Zhou (Facebook) int *next_off) 101540064aecSDennis Zhou (Facebook) { 1016e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 101740064aecSDennis Zhou (Facebook) 101840064aecSDennis Zhou (Facebook) page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 101940064aecSDennis Zhou (Facebook) page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 102040064aecSDennis Zhou (Facebook) 102140064aecSDennis Zhou (Facebook) rs = page_start; 1022e837dfdeSDennis Zhou bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); 102340064aecSDennis Zhou (Facebook) if (rs >= page_end) 102440064aecSDennis Zhou (Facebook) return true; 102540064aecSDennis Zhou (Facebook) 102640064aecSDennis Zhou (Facebook) *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 102740064aecSDennis Zhou (Facebook) return false; 102840064aecSDennis Zhou (Facebook) } 102940064aecSDennis Zhou (Facebook) 103040064aecSDennis Zhou (Facebook) /** 103140064aecSDennis Zhou (Facebook) * pcpu_find_block_fit - finds the block index to start searching 103240064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 103340064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 103440064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE bytes) 103540064aecSDennis Zhou (Facebook) * @pop_only: use populated regions only 103640064aecSDennis Zhou (Facebook) * 1037b4c2116cSDennis Zhou (Facebook) * Given a chunk and an allocation spec, find the offset to begin searching 1038b4c2116cSDennis Zhou (Facebook) * for a free region. This iterates over the bitmap metadata blocks to 1039b4c2116cSDennis Zhou (Facebook) * find an offset that will be guaranteed to fit the requirements. It is 1040b4c2116cSDennis Zhou (Facebook) * not quite first fit as if the allocation does not fit in the contig hint 1041b4c2116cSDennis Zhou (Facebook) * of a block or chunk, it is skipped. This errs on the side of caution 1042b4c2116cSDennis Zhou (Facebook) * to prevent excess iteration. Poor alignment can cause the allocator to 1043b4c2116cSDennis Zhou (Facebook) * skip over blocks and chunks that have valid free areas. 1044b4c2116cSDennis Zhou (Facebook) * 104540064aecSDennis Zhou (Facebook) * RETURNS: 104640064aecSDennis Zhou (Facebook) * The offset in the bitmap to begin searching. 104740064aecSDennis Zhou (Facebook) * -1 if no offset is found. 104840064aecSDennis Zhou (Facebook) */ 104940064aecSDennis Zhou (Facebook) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 105040064aecSDennis Zhou (Facebook) size_t align, bool pop_only) 105140064aecSDennis Zhou (Facebook) { 105292c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1053b4c2116cSDennis Zhou (Facebook) int bit_off, bits, next_off; 105440064aecSDennis Zhou (Facebook) 105513f96637SDennis Zhou (Facebook) /* 105613f96637SDennis Zhou (Facebook) * Check to see if the allocation can fit in the chunk's contig hint. 105713f96637SDennis Zhou (Facebook) * This is an optimization to prevent scanning by assuming if it 105813f96637SDennis Zhou (Facebook) * cannot fit in the global hint, there is memory pressure and creating 105913f96637SDennis Zhou (Facebook) * a new chunk would happen soon. 106013f96637SDennis Zhou (Facebook) */ 106192c14cabSDennis Zhou bit_off = ALIGN(chunk_md->contig_hint_start, align) - 106292c14cabSDennis Zhou chunk_md->contig_hint_start; 106392c14cabSDennis Zhou if (bit_off + alloc_bits > chunk_md->contig_hint) 106413f96637SDennis Zhou (Facebook) return -1; 106513f96637SDennis Zhou (Facebook) 1066d33d9f3dSDennis Zhou bit_off = pcpu_next_hint(chunk_md, alloc_bits); 1067b4c2116cSDennis Zhou (Facebook) bits = 0; 1068b4c2116cSDennis Zhou (Facebook) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 106940064aecSDennis Zhou (Facebook) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 1070b4c2116cSDennis Zhou (Facebook) &next_off)) 107140064aecSDennis Zhou (Facebook) break; 107240064aecSDennis Zhou (Facebook) 1073b4c2116cSDennis Zhou (Facebook) bit_off = next_off; 107440064aecSDennis Zhou (Facebook) bits = 0; 107540064aecSDennis Zhou (Facebook) } 107640064aecSDennis Zhou (Facebook) 107740064aecSDennis Zhou (Facebook) if (bit_off == pcpu_chunk_map_bits(chunk)) 107840064aecSDennis Zhou (Facebook) return -1; 107940064aecSDennis Zhou (Facebook) 108040064aecSDennis Zhou (Facebook) return bit_off; 108140064aecSDennis Zhou (Facebook) } 108240064aecSDennis Zhou (Facebook) 1083b89462a9SDennis Zhou /* 1084b89462a9SDennis Zhou * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() 1085b89462a9SDennis Zhou * @map: the address to base the search on 1086b89462a9SDennis Zhou * @size: the bitmap size in bits 1087b89462a9SDennis Zhou * @start: the bitnumber to start searching at 1088b89462a9SDennis Zhou * @nr: the number of zeroed bits we're looking for 1089b89462a9SDennis Zhou * @align_mask: alignment mask for zero area 1090b89462a9SDennis Zhou * @largest_off: offset of the largest area skipped 1091b89462a9SDennis Zhou * @largest_bits: size of the largest area skipped 1092b89462a9SDennis Zhou * 1093b89462a9SDennis Zhou * The @align_mask should be one less than a power of 2. 1094b89462a9SDennis Zhou * 1095b89462a9SDennis Zhou * This is a modified version of bitmap_find_next_zero_area_off() to remember 1096b89462a9SDennis Zhou * the largest area that was skipped. This is imperfect, but in general is 1097b89462a9SDennis Zhou * good enough. The largest remembered region is the largest failed region 1098b89462a9SDennis Zhou * seen. This does not include anything we possibly skipped due to alignment. 1099b89462a9SDennis Zhou * pcpu_block_update_scan() does scan backwards to try and recover what was 1100b89462a9SDennis Zhou * lost to alignment. While this can cause scanning to miss earlier possible 1101b89462a9SDennis Zhou * free areas, smaller allocations will eventually fill those holes. 1102b89462a9SDennis Zhou */ 1103b89462a9SDennis Zhou static unsigned long pcpu_find_zero_area(unsigned long *map, 1104b89462a9SDennis Zhou unsigned long size, 1105b89462a9SDennis Zhou unsigned long start, 1106b89462a9SDennis Zhou unsigned long nr, 1107b89462a9SDennis Zhou unsigned long align_mask, 1108b89462a9SDennis Zhou unsigned long *largest_off, 1109b89462a9SDennis Zhou unsigned long *largest_bits) 1110b89462a9SDennis Zhou { 1111b89462a9SDennis Zhou unsigned long index, end, i, area_off, area_bits; 1112b89462a9SDennis Zhou again: 1113b89462a9SDennis Zhou index = find_next_zero_bit(map, size, start); 1114b89462a9SDennis Zhou 1115b89462a9SDennis Zhou /* Align allocation */ 1116b89462a9SDennis Zhou index = __ALIGN_MASK(index, align_mask); 1117b89462a9SDennis Zhou area_off = index; 1118b89462a9SDennis Zhou 1119b89462a9SDennis Zhou end = index + nr; 1120b89462a9SDennis Zhou if (end > size) 1121b89462a9SDennis Zhou return end; 1122b89462a9SDennis Zhou i = find_next_bit(map, end, index); 1123b89462a9SDennis Zhou if (i < end) { 1124b89462a9SDennis Zhou area_bits = i - area_off; 1125b89462a9SDennis Zhou /* remember largest unused area with best alignment */ 1126b89462a9SDennis Zhou if (area_bits > *largest_bits || 1127b89462a9SDennis Zhou (area_bits == *largest_bits && *largest_off && 1128b89462a9SDennis Zhou (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { 1129b89462a9SDennis Zhou *largest_off = area_off; 1130b89462a9SDennis Zhou *largest_bits = area_bits; 1131b89462a9SDennis Zhou } 1132b89462a9SDennis Zhou 1133b89462a9SDennis Zhou start = i + 1; 1134b89462a9SDennis Zhou goto again; 1135b89462a9SDennis Zhou } 1136b89462a9SDennis Zhou return index; 1137b89462a9SDennis Zhou } 1138b89462a9SDennis Zhou 113940064aecSDennis Zhou (Facebook) /** 114040064aecSDennis Zhou (Facebook) * pcpu_alloc_area - allocates an area from a pcpu_chunk 114140064aecSDennis Zhou (Facebook) * @chunk: chunk of interest 114240064aecSDennis Zhou (Facebook) * @alloc_bits: size of request in allocation units 114340064aecSDennis Zhou (Facebook) * @align: alignment of area (max PAGE_SIZE) 114440064aecSDennis Zhou (Facebook) * @start: bit_off to start searching 114540064aecSDennis Zhou (Facebook) * 114640064aecSDennis Zhou (Facebook) * This function takes in a @start offset to begin searching to fit an 1147b4c2116cSDennis Zhou (Facebook) * allocation of @alloc_bits with alignment @align. It needs to scan 1148b4c2116cSDennis Zhou (Facebook) * the allocation map because if it fits within the block's contig hint, 1149b4c2116cSDennis Zhou (Facebook) * @start will be block->first_free. This is an attempt to fill the 1150b4c2116cSDennis Zhou (Facebook) * allocation prior to breaking the contig hint. The allocation and 1151b4c2116cSDennis Zhou (Facebook) * boundary maps are updated accordingly if it confirms a valid 1152b4c2116cSDennis Zhou (Facebook) * free area. 115340064aecSDennis Zhou (Facebook) * 115440064aecSDennis Zhou (Facebook) * RETURNS: 115540064aecSDennis Zhou (Facebook) * Allocated addr offset in @chunk on success. 115640064aecSDennis Zhou (Facebook) * -1 if no matching area is found. 115740064aecSDennis Zhou (Facebook) */ 115840064aecSDennis Zhou (Facebook) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 115940064aecSDennis Zhou (Facebook) size_t align, int start) 116040064aecSDennis Zhou (Facebook) { 116192c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 116240064aecSDennis Zhou (Facebook) size_t align_mask = (align) ? (align - 1) : 0; 1163b89462a9SDennis Zhou unsigned long area_off = 0, area_bits = 0; 116440064aecSDennis Zhou (Facebook) int bit_off, end, oslot; 11659f7dcf22STejun Heo 11664f996e23STejun Heo lockdep_assert_held(&pcpu_lock); 11674f996e23STejun Heo 116840064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1169833af842STejun Heo 1170833af842STejun Heo /* 117140064aecSDennis Zhou (Facebook) * Search to find a fit. 1172833af842STejun Heo */ 11738c43004aSDennis Zhou end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, 11748c43004aSDennis Zhou pcpu_chunk_map_bits(chunk)); 1175b89462a9SDennis Zhou bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, 1176b89462a9SDennis Zhou align_mask, &area_off, &area_bits); 117740064aecSDennis Zhou (Facebook) if (bit_off >= end) 1178a16037c8STejun Heo return -1; 1179a16037c8STejun Heo 1180b89462a9SDennis Zhou if (area_bits) 1181b89462a9SDennis Zhou pcpu_block_update_scan(chunk, area_off, area_bits); 1182b89462a9SDennis Zhou 118340064aecSDennis Zhou (Facebook) /* update alloc map */ 118440064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 1185a16037c8STejun Heo 118640064aecSDennis Zhou (Facebook) /* update boundary map */ 118740064aecSDennis Zhou (Facebook) set_bit(bit_off, chunk->bound_map); 118840064aecSDennis Zhou (Facebook) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 118940064aecSDennis Zhou (Facebook) set_bit(bit_off + alloc_bits, chunk->bound_map); 1190a16037c8STejun Heo 119140064aecSDennis Zhou (Facebook) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 119240064aecSDennis Zhou (Facebook) 119386b442fbSDennis Zhou (Facebook) /* update first free bit */ 119492c14cabSDennis Zhou if (bit_off == chunk_md->first_free) 119592c14cabSDennis Zhou chunk_md->first_free = find_next_zero_bit( 119686b442fbSDennis Zhou (Facebook) chunk->alloc_map, 119786b442fbSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk), 119886b442fbSDennis Zhou (Facebook) bit_off + alloc_bits); 119986b442fbSDennis Zhou (Facebook) 1200ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 120140064aecSDennis Zhou (Facebook) 120240064aecSDennis Zhou (Facebook) pcpu_chunk_relocate(chunk, oslot); 120340064aecSDennis Zhou (Facebook) 120440064aecSDennis Zhou (Facebook) return bit_off * PCPU_MIN_ALLOC_SIZE; 1205a16037c8STejun Heo } 1206a16037c8STejun Heo 1207a16037c8STejun Heo /** 120840064aecSDennis Zhou (Facebook) * pcpu_free_area - frees the corresponding offset 1209fbf59bc9STejun Heo * @chunk: chunk of interest 121040064aecSDennis Zhou (Facebook) * @off: addr offset into chunk 1211fbf59bc9STejun Heo * 121240064aecSDennis Zhou (Facebook) * This function determines the size of an allocation to free using 121340064aecSDennis Zhou (Facebook) * the boundary bitmap and clears the allocation map. 1214fbf59bc9STejun Heo */ 121540064aecSDennis Zhou (Facebook) static void pcpu_free_area(struct pcpu_chunk *chunk, int off) 1216fbf59bc9STejun Heo { 121792c14cabSDennis Zhou struct pcpu_block_md *chunk_md = &chunk->chunk_md; 121840064aecSDennis Zhou (Facebook) int bit_off, bits, end, oslot; 1219fbf59bc9STejun Heo 12205ccd30e4SDennis Zhou lockdep_assert_held(&pcpu_lock); 122130a5b536SDennis Zhou pcpu_stats_area_dealloc(chunk); 12225ccd30e4SDennis Zhou 122340064aecSDennis Zhou (Facebook) oslot = pcpu_chunk_slot(chunk); 1224723ad1d9SAl Viro 122540064aecSDennis Zhou (Facebook) bit_off = off / PCPU_MIN_ALLOC_SIZE; 1226fbf59bc9STejun Heo 122740064aecSDennis Zhou (Facebook) /* find end index */ 122840064aecSDennis Zhou (Facebook) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 122940064aecSDennis Zhou (Facebook) bit_off + 1); 123040064aecSDennis Zhou (Facebook) bits = end - bit_off; 123140064aecSDennis Zhou (Facebook) bitmap_clear(chunk->alloc_map, bit_off, bits); 12323d331ad7SAl Viro 123340064aecSDennis Zhou (Facebook) /* update metadata */ 123440064aecSDennis Zhou (Facebook) chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE; 1235fbf59bc9STejun Heo 123686b442fbSDennis Zhou (Facebook) /* update first free bit */ 123792c14cabSDennis Zhou chunk_md->first_free = min(chunk_md->first_free, bit_off); 123886b442fbSDennis Zhou (Facebook) 1239ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_free(chunk, bit_off, bits); 1240b539b87fSTejun Heo 1241fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, oslot); 1242fbf59bc9STejun Heo } 1243fbf59bc9STejun Heo 1244047924c9SDennis Zhou static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) 1245047924c9SDennis Zhou { 1246047924c9SDennis Zhou block->scan_hint = 0; 1247047924c9SDennis Zhou block->contig_hint = nr_bits; 1248047924c9SDennis Zhou block->left_free = nr_bits; 1249047924c9SDennis Zhou block->right_free = nr_bits; 1250047924c9SDennis Zhou block->first_free = 0; 1251047924c9SDennis Zhou block->nr_bits = nr_bits; 1252047924c9SDennis Zhou } 1253047924c9SDennis Zhou 1254ca460b3cSDennis Zhou (Facebook) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1255ca460b3cSDennis Zhou (Facebook) { 1256ca460b3cSDennis Zhou (Facebook) struct pcpu_block_md *md_block; 1257ca460b3cSDennis Zhou (Facebook) 125892c14cabSDennis Zhou /* init the chunk's block */ 125992c14cabSDennis Zhou pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); 126092c14cabSDennis Zhou 1261ca460b3cSDennis Zhou (Facebook) for (md_block = chunk->md_blocks; 1262ca460b3cSDennis Zhou (Facebook) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1263047924c9SDennis Zhou md_block++) 1264047924c9SDennis Zhou pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); 1265ca460b3cSDennis Zhou (Facebook) } 1266ca460b3cSDennis Zhou (Facebook) 126740064aecSDennis Zhou (Facebook) /** 126840064aecSDennis Zhou (Facebook) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 126940064aecSDennis Zhou (Facebook) * @tmp_addr: the start of the region served 127040064aecSDennis Zhou (Facebook) * @map_size: size of the region served 127140064aecSDennis Zhou (Facebook) * 127240064aecSDennis Zhou (Facebook) * This is responsible for creating the chunks that serve the first chunk. The 127340064aecSDennis Zhou (Facebook) * base_addr is page aligned down of @tmp_addr while the region end is page 127440064aecSDennis Zhou (Facebook) * aligned up. Offsets are kept track of to determine the region served. All 127540064aecSDennis Zhou (Facebook) * this is done to appease the bitmap allocator in avoiding partial blocks. 127640064aecSDennis Zhou (Facebook) * 127740064aecSDennis Zhou (Facebook) * RETURNS: 127840064aecSDennis Zhou (Facebook) * Chunk serving the region at @tmp_addr of @map_size. 127940064aecSDennis Zhou (Facebook) */ 1280c0ebfdc3SDennis Zhou (Facebook) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 128140064aecSDennis Zhou (Facebook) int map_size) 128210edf5b0SDennis Zhou (Facebook) { 128310edf5b0SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 1284ca460b3cSDennis Zhou (Facebook) unsigned long aligned_addr, lcm_align; 128540064aecSDennis Zhou (Facebook) int start_offset, offset_bits, region_size, region_bits; 1286f655f405SMike Rapoport size_t alloc_size; 1287c0ebfdc3SDennis Zhou (Facebook) 1288c0ebfdc3SDennis Zhou (Facebook) /* region calculations */ 1289c0ebfdc3SDennis Zhou (Facebook) aligned_addr = tmp_addr & PAGE_MASK; 1290c0ebfdc3SDennis Zhou (Facebook) 1291c0ebfdc3SDennis Zhou (Facebook) start_offset = tmp_addr - aligned_addr; 12926b9d7c8eSDennis Zhou (Facebook) 1293ca460b3cSDennis Zhou (Facebook) /* 1294ca460b3cSDennis Zhou (Facebook) * Align the end of the region with the LCM of PAGE_SIZE and 1295ca460b3cSDennis Zhou (Facebook) * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of 1296ca460b3cSDennis Zhou (Facebook) * the other. 1297ca460b3cSDennis Zhou (Facebook) */ 1298ca460b3cSDennis Zhou (Facebook) lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); 1299ca460b3cSDennis Zhou (Facebook) region_size = ALIGN(start_offset + map_size, lcm_align); 130010edf5b0SDennis Zhou (Facebook) 1301c0ebfdc3SDennis Zhou (Facebook) /* allocate chunk */ 1302f655f405SMike Rapoport alloc_size = sizeof(struct pcpu_chunk) + 1303f655f405SMike Rapoport BITS_TO_LONGS(region_size >> PAGE_SHIFT); 1304f655f405SMike Rapoport chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1305f655f405SMike Rapoport if (!chunk) 1306f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1307f655f405SMike Rapoport alloc_size); 1308c0ebfdc3SDennis Zhou (Facebook) 130910edf5b0SDennis Zhou (Facebook) INIT_LIST_HEAD(&chunk->list); 1310c0ebfdc3SDennis Zhou (Facebook) 1311c0ebfdc3SDennis Zhou (Facebook) chunk->base_addr = (void *)aligned_addr; 131210edf5b0SDennis Zhou (Facebook) chunk->start_offset = start_offset; 13136b9d7c8eSDennis Zhou (Facebook) chunk->end_offset = region_size - chunk->start_offset - map_size; 1314c0ebfdc3SDennis Zhou (Facebook) 13158ab16c43SDennis Zhou (Facebook) chunk->nr_pages = region_size >> PAGE_SHIFT; 131640064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 1317c0ebfdc3SDennis Zhou (Facebook) 1318f655f405SMike Rapoport alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); 1319f655f405SMike Rapoport chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1320f655f405SMike Rapoport if (!chunk->alloc_map) 1321f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1322f655f405SMike Rapoport alloc_size); 1323f655f405SMike Rapoport 1324f655f405SMike Rapoport alloc_size = 1325f655f405SMike Rapoport BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); 1326f655f405SMike Rapoport chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1327f655f405SMike Rapoport if (!chunk->bound_map) 1328f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1329f655f405SMike Rapoport alloc_size); 1330f655f405SMike Rapoport 1331f655f405SMike Rapoport alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); 1332f655f405SMike Rapoport chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1333f655f405SMike Rapoport if (!chunk->md_blocks) 1334f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 1335f655f405SMike Rapoport alloc_size); 1336f655f405SMike Rapoport 1337ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 133810edf5b0SDennis Zhou (Facebook) 133910edf5b0SDennis Zhou (Facebook) /* manage populated page bitmap */ 134010edf5b0SDennis Zhou (Facebook) chunk->immutable = true; 13418ab16c43SDennis Zhou (Facebook) bitmap_fill(chunk->populated, chunk->nr_pages); 13428ab16c43SDennis Zhou (Facebook) chunk->nr_populated = chunk->nr_pages; 1343b239f7daSDennis Zhou chunk->nr_empty_pop_pages = chunk->nr_pages; 134410edf5b0SDennis Zhou (Facebook) 134540064aecSDennis Zhou (Facebook) chunk->free_bytes = map_size; 1346c0ebfdc3SDennis Zhou (Facebook) 1347c0ebfdc3SDennis Zhou (Facebook) if (chunk->start_offset) { 1348c0ebfdc3SDennis Zhou (Facebook) /* hide the beginning of the bitmap */ 134940064aecSDennis Zhou (Facebook) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 135040064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 0, offset_bits); 135140064aecSDennis Zhou (Facebook) set_bit(0, chunk->bound_map); 135240064aecSDennis Zhou (Facebook) set_bit(offset_bits, chunk->bound_map); 1353ca460b3cSDennis Zhou (Facebook) 135492c14cabSDennis Zhou chunk->chunk_md.first_free = offset_bits; 135586b442fbSDennis Zhou (Facebook) 1356ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1357c0ebfdc3SDennis Zhou (Facebook) } 1358c0ebfdc3SDennis Zhou (Facebook) 13596b9d7c8eSDennis Zhou (Facebook) if (chunk->end_offset) { 13606b9d7c8eSDennis Zhou (Facebook) /* hide the end of the bitmap */ 136140064aecSDennis Zhou (Facebook) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 136240064aecSDennis Zhou (Facebook) bitmap_set(chunk->alloc_map, 136340064aecSDennis Zhou (Facebook) pcpu_chunk_map_bits(chunk) - offset_bits, 136440064aecSDennis Zhou (Facebook) offset_bits); 136540064aecSDennis Zhou (Facebook) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 136640064aecSDennis Zhou (Facebook) chunk->bound_map); 136740064aecSDennis Zhou (Facebook) set_bit(region_bits, chunk->bound_map); 13686b9d7c8eSDennis Zhou (Facebook) 1369ca460b3cSDennis Zhou (Facebook) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1370ca460b3cSDennis Zhou (Facebook) - offset_bits, offset_bits); 1371ca460b3cSDennis Zhou (Facebook) } 137240064aecSDennis Zhou (Facebook) 137310edf5b0SDennis Zhou (Facebook) return chunk; 137410edf5b0SDennis Zhou (Facebook) } 137510edf5b0SDennis Zhou (Facebook) 137647504ee0SDennis Zhou static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) 13776081089fSTejun Heo { 13786081089fSTejun Heo struct pcpu_chunk *chunk; 137940064aecSDennis Zhou (Facebook) int region_bits; 13806081089fSTejun Heo 138147504ee0SDennis Zhou chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); 13826081089fSTejun Heo if (!chunk) 13836081089fSTejun Heo return NULL; 13846081089fSTejun Heo 13856081089fSTejun Heo INIT_LIST_HEAD(&chunk->list); 1386c0ebfdc3SDennis Zhou (Facebook) chunk->nr_pages = pcpu_unit_pages; 138740064aecSDennis Zhou (Facebook) region_bits = pcpu_chunk_map_bits(chunk); 138840064aecSDennis Zhou (Facebook) 138940064aecSDennis Zhou (Facebook) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 139047504ee0SDennis Zhou sizeof(chunk->alloc_map[0]), gfp); 139140064aecSDennis Zhou (Facebook) if (!chunk->alloc_map) 139240064aecSDennis Zhou (Facebook) goto alloc_map_fail; 139340064aecSDennis Zhou (Facebook) 139440064aecSDennis Zhou (Facebook) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 139547504ee0SDennis Zhou sizeof(chunk->bound_map[0]), gfp); 139640064aecSDennis Zhou (Facebook) if (!chunk->bound_map) 139740064aecSDennis Zhou (Facebook) goto bound_map_fail; 139840064aecSDennis Zhou (Facebook) 1399ca460b3cSDennis Zhou (Facebook) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 140047504ee0SDennis Zhou sizeof(chunk->md_blocks[0]), gfp); 1401ca460b3cSDennis Zhou (Facebook) if (!chunk->md_blocks) 1402ca460b3cSDennis Zhou (Facebook) goto md_blocks_fail; 1403ca460b3cSDennis Zhou (Facebook) 1404ca460b3cSDennis Zhou (Facebook) pcpu_init_md_blocks(chunk); 1405ca460b3cSDennis Zhou (Facebook) 140640064aecSDennis Zhou (Facebook) /* init metadata */ 140740064aecSDennis Zhou (Facebook) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1408c0ebfdc3SDennis Zhou (Facebook) 14096081089fSTejun Heo return chunk; 141040064aecSDennis Zhou (Facebook) 1411ca460b3cSDennis Zhou (Facebook) md_blocks_fail: 1412ca460b3cSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 141340064aecSDennis Zhou (Facebook) bound_map_fail: 141440064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 141540064aecSDennis Zhou (Facebook) alloc_map_fail: 141640064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk); 141740064aecSDennis Zhou (Facebook) 141840064aecSDennis Zhou (Facebook) return NULL; 14196081089fSTejun Heo } 14206081089fSTejun Heo 14216081089fSTejun Heo static void pcpu_free_chunk(struct pcpu_chunk *chunk) 14226081089fSTejun Heo { 14236081089fSTejun Heo if (!chunk) 14246081089fSTejun Heo return; 14256685b357SMike Rapoport pcpu_mem_free(chunk->md_blocks); 142640064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->bound_map); 142740064aecSDennis Zhou (Facebook) pcpu_mem_free(chunk->alloc_map); 14281d5cfdb0STetsuo Handa pcpu_mem_free(chunk); 14296081089fSTejun Heo } 14306081089fSTejun Heo 1431b539b87fSTejun Heo /** 1432b539b87fSTejun Heo * pcpu_chunk_populated - post-population bookkeeping 1433b539b87fSTejun Heo * @chunk: pcpu_chunk which got populated 1434b539b87fSTejun Heo * @page_start: the start page 1435b539b87fSTejun Heo * @page_end: the end page 1436b539b87fSTejun Heo * 1437b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1438b539b87fSTejun Heo * the bookkeeping information accordingly. Must be called after each 1439b539b87fSTejun Heo * successful population. 144040064aecSDennis Zhou (Facebook) * 144140064aecSDennis Zhou (Facebook) * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it 144240064aecSDennis Zhou (Facebook) * is to serve an allocation in that area. 1443b539b87fSTejun Heo */ 144440064aecSDennis Zhou (Facebook) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 1445b239f7daSDennis Zhou int page_end) 1446b539b87fSTejun Heo { 1447b539b87fSTejun Heo int nr = page_end - page_start; 1448b539b87fSTejun Heo 1449b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1450b539b87fSTejun Heo 1451b539b87fSTejun Heo bitmap_set(chunk->populated, page_start, nr); 1452b539b87fSTejun Heo chunk->nr_populated += nr; 14537e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += nr; 145440064aecSDennis Zhou (Facebook) 1455b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, nr); 145640064aecSDennis Zhou (Facebook) } 1457b539b87fSTejun Heo 1458b539b87fSTejun Heo /** 1459b539b87fSTejun Heo * pcpu_chunk_depopulated - post-depopulation bookkeeping 1460b539b87fSTejun Heo * @chunk: pcpu_chunk which got depopulated 1461b539b87fSTejun Heo * @page_start: the start page 1462b539b87fSTejun Heo * @page_end: the end page 1463b539b87fSTejun Heo * 1464b539b87fSTejun Heo * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1465b539b87fSTejun Heo * Update the bookkeeping information accordingly. Must be called after 1466b539b87fSTejun Heo * each successful depopulation. 1467b539b87fSTejun Heo */ 1468b539b87fSTejun Heo static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1469b539b87fSTejun Heo int page_start, int page_end) 1470b539b87fSTejun Heo { 1471b539b87fSTejun Heo int nr = page_end - page_start; 1472b539b87fSTejun Heo 1473b539b87fSTejun Heo lockdep_assert_held(&pcpu_lock); 1474b539b87fSTejun Heo 1475b539b87fSTejun Heo bitmap_clear(chunk->populated, page_start, nr); 1476b539b87fSTejun Heo chunk->nr_populated -= nr; 14777e8a6304SDennis Zhou (Facebook) pcpu_nr_populated -= nr; 1478b239f7daSDennis Zhou 1479b239f7daSDennis Zhou pcpu_update_empty_pages(chunk, -nr); 1480b539b87fSTejun Heo } 1481b539b87fSTejun Heo 1482fbf59bc9STejun Heo /* 14839f645532STejun Heo * Chunk management implementation. 1484fbf59bc9STejun Heo * 14859f645532STejun Heo * To allow different implementations, chunk alloc/free and 14869f645532STejun Heo * [de]population are implemented in a separate file which is pulled 14879f645532STejun Heo * into this file and compiled together. The following functions 14889f645532STejun Heo * should be implemented. 1489ccea34b5STejun Heo * 14909f645532STejun Heo * pcpu_populate_chunk - populate the specified range of a chunk 14919f645532STejun Heo * pcpu_depopulate_chunk - depopulate the specified range of a chunk 14929f645532STejun Heo * pcpu_create_chunk - create a new chunk 14939f645532STejun Heo * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 14949f645532STejun Heo * pcpu_addr_to_page - translate address to physical address 14959f645532STejun Heo * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1496fbf59bc9STejun Heo */ 149715d9f3d1SDennis Zhou static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 149847504ee0SDennis Zhou int page_start, int page_end, gfp_t gfp); 149915d9f3d1SDennis Zhou static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 150015d9f3d1SDennis Zhou int page_start, int page_end); 150147504ee0SDennis Zhou static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); 15029f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 15039f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr); 15049f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1505fbf59bc9STejun Heo 1506b0c9778bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_KM 1507b0c9778bSTejun Heo #include "percpu-km.c" 1508b0c9778bSTejun Heo #else 15099f645532STejun Heo #include "percpu-vm.c" 1510b0c9778bSTejun Heo #endif 1511fbf59bc9STejun Heo 1512fbf59bc9STejun Heo /** 151388999a89STejun Heo * pcpu_chunk_addr_search - determine chunk containing specified address 151488999a89STejun Heo * @addr: address for which the chunk needs to be determined. 151588999a89STejun Heo * 1516c0ebfdc3SDennis Zhou (Facebook) * This is an internal function that handles all but static allocations. 1517c0ebfdc3SDennis Zhou (Facebook) * Static percpu address values should never be passed into the allocator. 1518c0ebfdc3SDennis Zhou (Facebook) * 151988999a89STejun Heo * RETURNS: 152088999a89STejun Heo * The address of the found chunk. 152188999a89STejun Heo */ 152288999a89STejun Heo static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 152388999a89STejun Heo { 1524c0ebfdc3SDennis Zhou (Facebook) /* is it in the dynamic region (first chunk)? */ 1525560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1526c0ebfdc3SDennis Zhou (Facebook) return pcpu_first_chunk; 1527c0ebfdc3SDennis Zhou (Facebook) 1528c0ebfdc3SDennis Zhou (Facebook) /* is it in the reserved region? */ 1529560f2c23SDennis Zhou (Facebook) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 153088999a89STejun Heo return pcpu_reserved_chunk; 153188999a89STejun Heo 153288999a89STejun Heo /* 153388999a89STejun Heo * The address is relative to unit0 which might be unused and 153488999a89STejun Heo * thus unmapped. Offset the address to the unit space of the 153588999a89STejun Heo * current processor before looking it up in the vmalloc 153688999a89STejun Heo * space. Note that any possible cpu id can be used here, so 153788999a89STejun Heo * there's no need to worry about preemption or cpu hotplug. 153888999a89STejun Heo */ 153988999a89STejun Heo addr += pcpu_unit_offsets[raw_smp_processor_id()]; 15409f645532STejun Heo return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 154188999a89STejun Heo } 154288999a89STejun Heo 154388999a89STejun Heo /** 1544edcb4639STejun Heo * pcpu_alloc - the percpu allocator 1545cae3aeb8STejun Heo * @size: size of area to allocate in bytes 1546fbf59bc9STejun Heo * @align: alignment of area (max PAGE_SIZE) 1547edcb4639STejun Heo * @reserved: allocate from the reserved chunk if available 15485835d96eSTejun Heo * @gfp: allocation flags 1549fbf59bc9STejun Heo * 15505835d96eSTejun Heo * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 15510ea7eeecSDaniel Borkmann * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN 15520ea7eeecSDaniel Borkmann * then no warning will be triggered on invalid or failed allocation 15530ea7eeecSDaniel Borkmann * requests. 1554fbf59bc9STejun Heo * 1555fbf59bc9STejun Heo * RETURNS: 1556fbf59bc9STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1557fbf59bc9STejun Heo */ 15585835d96eSTejun Heo static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 15595835d96eSTejun Heo gfp_t gfp) 1560fbf59bc9STejun Heo { 156128307d93SFilipe Manana gfp_t pcpu_gfp; 156228307d93SFilipe Manana bool is_atomic; 156328307d93SFilipe Manana bool do_warn; 1564f2badb0cSTejun Heo static int warn_limit = 10; 15658744d859SDennis Zhou struct pcpu_chunk *chunk, *next; 1566f2badb0cSTejun Heo const char *err; 156740064aecSDennis Zhou (Facebook) int slot, off, cpu, ret; 1568403a91b1SJiri Kosina unsigned long flags; 1569f528f0b8SCatalin Marinas void __percpu *ptr; 157040064aecSDennis Zhou (Facebook) size_t bits, bit_align; 1571fbf59bc9STejun Heo 157228307d93SFilipe Manana gfp = current_gfp_context(gfp); 157328307d93SFilipe Manana /* whitelisted flags that can be passed to the backing allocators */ 157428307d93SFilipe Manana pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 157528307d93SFilipe Manana is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 157628307d93SFilipe Manana do_warn = !(gfp & __GFP_NOWARN); 157728307d93SFilipe Manana 1578723ad1d9SAl Viro /* 157940064aecSDennis Zhou (Facebook) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 158040064aecSDennis Zhou (Facebook) * therefore alignment must be a minimum of that many bytes. 158140064aecSDennis Zhou (Facebook) * An allocation may have internal fragmentation from rounding up 158240064aecSDennis Zhou (Facebook) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1583723ad1d9SAl Viro */ 1584d2f3c384SDennis Zhou (Facebook) if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1585d2f3c384SDennis Zhou (Facebook) align = PCPU_MIN_ALLOC_SIZE; 1586723ad1d9SAl Viro 1587d2f3c384SDennis Zhou (Facebook) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 158840064aecSDennis Zhou (Facebook) bits = size >> PCPU_MIN_ALLOC_SHIFT; 158940064aecSDennis Zhou (Facebook) bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 15902f69fa82SViro 15913ca45a46Szijun_hu if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 15923ca45a46Szijun_hu !is_power_of_2(align))) { 15930ea7eeecSDaniel Borkmann WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1594756a025fSJoe Perches size, align); 1595fbf59bc9STejun Heo return NULL; 1596fbf59bc9STejun Heo } 1597fbf59bc9STejun Heo 1598f52ba1feSKirill Tkhai if (!is_atomic) { 1599f52ba1feSKirill Tkhai /* 1600f52ba1feSKirill Tkhai * pcpu_balance_workfn() allocates memory under this mutex, 1601f52ba1feSKirill Tkhai * and it may wait for memory reclaim. Allow current task 1602f52ba1feSKirill Tkhai * to become OOM victim, in case of memory pressure. 1603f52ba1feSKirill Tkhai */ 1604f52ba1feSKirill Tkhai if (gfp & __GFP_NOFAIL) 16056710e594STejun Heo mutex_lock(&pcpu_alloc_mutex); 1606f52ba1feSKirill Tkhai else if (mutex_lock_killable(&pcpu_alloc_mutex)) 1607f52ba1feSKirill Tkhai return NULL; 1608f52ba1feSKirill Tkhai } 16096710e594STejun Heo 1610403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1611fbf59bc9STejun Heo 1612edcb4639STejun Heo /* serve reserved allocations from the reserved chunk if available */ 1613edcb4639STejun Heo if (reserved && pcpu_reserved_chunk) { 1614edcb4639STejun Heo chunk = pcpu_reserved_chunk; 1615833af842STejun Heo 161640064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 161740064aecSDennis Zhou (Facebook) if (off < 0) { 1618833af842STejun Heo err = "alloc from reserved chunk failed"; 1619ccea34b5STejun Heo goto fail_unlock; 1620f2badb0cSTejun Heo } 1621833af842STejun Heo 162240064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1623edcb4639STejun Heo if (off >= 0) 1624edcb4639STejun Heo goto area_found; 1625833af842STejun Heo 1626f2badb0cSTejun Heo err = "alloc from reserved chunk failed"; 1627ccea34b5STejun Heo goto fail_unlock; 1628edcb4639STejun Heo } 1629edcb4639STejun Heo 1630ccea34b5STejun Heo restart: 1631edcb4639STejun Heo /* search through normal chunks */ 1632fbf59bc9STejun Heo for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 16338744d859SDennis Zhou list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) { 163440064aecSDennis Zhou (Facebook) off = pcpu_find_block_fit(chunk, bits, bit_align, 163540064aecSDennis Zhou (Facebook) is_atomic); 16368744d859SDennis Zhou if (off < 0) { 16378744d859SDennis Zhou if (slot < PCPU_SLOT_FAIL_THRESHOLD) 16388744d859SDennis Zhou pcpu_chunk_move(chunk, 0); 1639fbf59bc9STejun Heo continue; 16408744d859SDennis Zhou } 1641ccea34b5STejun Heo 164240064aecSDennis Zhou (Facebook) off = pcpu_alloc_area(chunk, bits, bit_align, off); 1643fbf59bc9STejun Heo if (off >= 0) 1644fbf59bc9STejun Heo goto area_found; 164540064aecSDennis Zhou (Facebook) 1646fbf59bc9STejun Heo } 1647fbf59bc9STejun Heo } 1648fbf59bc9STejun Heo 1649403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1650ccea34b5STejun Heo 1651b38d08f3STejun Heo /* 1652b38d08f3STejun Heo * No space left. Create a new chunk. We don't want multiple 1653b38d08f3STejun Heo * tasks to create chunks simultaneously. Serialize and create iff 1654b38d08f3STejun Heo * there's still no empty chunk after grabbing the mutex. 1655b38d08f3STejun Heo */ 165611df02bfSDennis Zhou if (is_atomic) { 165711df02bfSDennis Zhou err = "atomic alloc failed, no space left"; 16585835d96eSTejun Heo goto fail; 165911df02bfSDennis Zhou } 16605835d96eSTejun Heo 1661b38d08f3STejun Heo if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 1662554fef1cSDennis Zhou chunk = pcpu_create_chunk(pcpu_gfp); 1663f2badb0cSTejun Heo if (!chunk) { 1664f2badb0cSTejun Heo err = "failed to allocate new chunk"; 1665b38d08f3STejun Heo goto fail; 1666f2badb0cSTejun Heo } 1667ccea34b5STejun Heo 1668403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1669fbf59bc9STejun Heo pcpu_chunk_relocate(chunk, -1); 1670b38d08f3STejun Heo } else { 1671b38d08f3STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1672b38d08f3STejun Heo } 1673b38d08f3STejun Heo 1674ccea34b5STejun Heo goto restart; 1675fbf59bc9STejun Heo 1676fbf59bc9STejun Heo area_found: 167730a5b536SDennis Zhou pcpu_stats_area_alloc(chunk, size); 1678403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1679ccea34b5STejun Heo 1680dca49645STejun Heo /* populate if not all pages are already there */ 16815835d96eSTejun Heo if (!is_atomic) { 1682e837dfdeSDennis Zhou unsigned int page_start, page_end, rs, re; 1683e04d3208STejun Heo 1684dca49645STejun Heo page_start = PFN_DOWN(off); 1685dca49645STejun Heo page_end = PFN_UP(off + size); 1686dca49645STejun Heo 1687e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 168891e914c5SDennis Zhou (Facebook) page_start, page_end) { 1689dca49645STejun Heo WARN_ON(chunk->immutable); 1690dca49645STejun Heo 1691554fef1cSDennis Zhou ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); 1692b38d08f3STejun Heo 1693403a91b1SJiri Kosina spin_lock_irqsave(&pcpu_lock, flags); 1694b38d08f3STejun Heo if (ret) { 169540064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1696f2badb0cSTejun Heo err = "failed to populate"; 1697ccea34b5STejun Heo goto fail_unlock; 1698fbf59bc9STejun Heo } 1699b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, re); 1700b38d08f3STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1701dca49645STejun Heo } 1702dca49645STejun Heo 1703ccea34b5STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1704e04d3208STejun Heo } 1705ccea34b5STejun Heo 17061a4d7607STejun Heo if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 17071a4d7607STejun Heo pcpu_schedule_balance_work(); 17081a4d7607STejun Heo 1709dca49645STejun Heo /* clear the areas and return address relative to base address */ 1710dca49645STejun Heo for_each_possible_cpu(cpu) 1711dca49645STejun Heo memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1712dca49645STejun Heo 1713f528f0b8SCatalin Marinas ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 17148a8c35faSLarry Finger kmemleak_alloc_percpu(ptr, size, gfp); 1715df95e795SDennis Zhou 1716df95e795SDennis Zhou trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1717df95e795SDennis Zhou chunk->base_addr, off, ptr); 1718df95e795SDennis Zhou 1719f528f0b8SCatalin Marinas return ptr; 1720ccea34b5STejun Heo 1721ccea34b5STejun Heo fail_unlock: 1722403a91b1SJiri Kosina spin_unlock_irqrestore(&pcpu_lock, flags); 1723b38d08f3STejun Heo fail: 1724df95e795SDennis Zhou trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1725df95e795SDennis Zhou 17260ea7eeecSDaniel Borkmann if (!is_atomic && do_warn && warn_limit) { 1727870d4b12SJoe Perches pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 17285835d96eSTejun Heo size, align, is_atomic, err); 1729f2badb0cSTejun Heo dump_stack(); 1730f2badb0cSTejun Heo if (!--warn_limit) 1731870d4b12SJoe Perches pr_info("limit reached, disable warning\n"); 1732f2badb0cSTejun Heo } 17331a4d7607STejun Heo if (is_atomic) { 17341a4d7607STejun Heo /* see the flag handling in pcpu_blance_workfn() */ 17351a4d7607STejun Heo pcpu_atomic_alloc_failed = true; 17361a4d7607STejun Heo pcpu_schedule_balance_work(); 17376710e594STejun Heo } else { 17386710e594STejun Heo mutex_unlock(&pcpu_alloc_mutex); 17391a4d7607STejun Heo } 1740ccea34b5STejun Heo return NULL; 1741fbf59bc9STejun Heo } 1742edcb4639STejun Heo 1743edcb4639STejun Heo /** 17445835d96eSTejun Heo * __alloc_percpu_gfp - allocate dynamic percpu area 1745edcb4639STejun Heo * @size: size of area to allocate in bytes 1746edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 17475835d96eSTejun Heo * @gfp: allocation flags 1748edcb4639STejun Heo * 17495835d96eSTejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align. If 17505835d96eSTejun Heo * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 17510ea7eeecSDaniel Borkmann * be called from any context but is a lot more likely to fail. If @gfp 17520ea7eeecSDaniel Borkmann * has __GFP_NOWARN then no warning will be triggered on invalid or failed 17530ea7eeecSDaniel Borkmann * allocation requests. 1754ccea34b5STejun Heo * 1755edcb4639STejun Heo * RETURNS: 1756edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1757edcb4639STejun Heo */ 17585835d96eSTejun Heo void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 17595835d96eSTejun Heo { 17605835d96eSTejun Heo return pcpu_alloc(size, align, false, gfp); 17615835d96eSTejun Heo } 17625835d96eSTejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 17635835d96eSTejun Heo 17645835d96eSTejun Heo /** 17655835d96eSTejun Heo * __alloc_percpu - allocate dynamic percpu area 17665835d96eSTejun Heo * @size: size of area to allocate in bytes 17675835d96eSTejun Heo * @align: alignment of area (max PAGE_SIZE) 17685835d96eSTejun Heo * 17695835d96eSTejun Heo * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 17705835d96eSTejun Heo */ 177143cf38ebSTejun Heo void __percpu *__alloc_percpu(size_t size, size_t align) 1772edcb4639STejun Heo { 17735835d96eSTejun Heo return pcpu_alloc(size, align, false, GFP_KERNEL); 1774edcb4639STejun Heo } 1775fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(__alloc_percpu); 1776fbf59bc9STejun Heo 1777edcb4639STejun Heo /** 1778edcb4639STejun Heo * __alloc_reserved_percpu - allocate reserved percpu area 1779edcb4639STejun Heo * @size: size of area to allocate in bytes 1780edcb4639STejun Heo * @align: alignment of area (max PAGE_SIZE) 1781edcb4639STejun Heo * 17829329ba97STejun Heo * Allocate zero-filled percpu area of @size bytes aligned at @align 17839329ba97STejun Heo * from reserved percpu area if arch has set it up; otherwise, 17849329ba97STejun Heo * allocation is served from the same dynamic area. Might sleep. 17859329ba97STejun Heo * Might trigger writeouts. 1786edcb4639STejun Heo * 1787ccea34b5STejun Heo * CONTEXT: 1788ccea34b5STejun Heo * Does GFP_KERNEL allocation. 1789ccea34b5STejun Heo * 1790edcb4639STejun Heo * RETURNS: 1791edcb4639STejun Heo * Percpu pointer to the allocated area on success, NULL on failure. 1792edcb4639STejun Heo */ 179343cf38ebSTejun Heo void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1794edcb4639STejun Heo { 17955835d96eSTejun Heo return pcpu_alloc(size, align, true, GFP_KERNEL); 1796edcb4639STejun Heo } 1797edcb4639STejun Heo 1798a56dbddfSTejun Heo /** 17991a4d7607STejun Heo * pcpu_balance_workfn - manage the amount of free chunks and populated pages 1800a56dbddfSTejun Heo * @work: unused 1801a56dbddfSTejun Heo * 180247504ee0SDennis Zhou * Reclaim all fully free chunks except for the first one. This is also 180347504ee0SDennis Zhou * responsible for maintaining the pool of empty populated pages. However, 180447504ee0SDennis Zhou * it is possible that this is called when physical memory is scarce causing 180547504ee0SDennis Zhou * OOM killer to be triggered. We should avoid doing so until an actual 180647504ee0SDennis Zhou * allocation causes the failure as it is possible that requests can be 180747504ee0SDennis Zhou * serviced from already backed regions. 1808a56dbddfSTejun Heo */ 1809fe6bd8c3STejun Heo static void pcpu_balance_workfn(struct work_struct *work) 1810fbf59bc9STejun Heo { 181147504ee0SDennis Zhou /* gfp flags passed to underlying allocators */ 1812554fef1cSDennis Zhou const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 1813fe6bd8c3STejun Heo LIST_HEAD(to_free); 1814fe6bd8c3STejun Heo struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1815a56dbddfSTejun Heo struct pcpu_chunk *chunk, *next; 18161a4d7607STejun Heo int slot, nr_to_pop, ret; 1817a56dbddfSTejun Heo 18181a4d7607STejun Heo /* 18191a4d7607STejun Heo * There's no reason to keep around multiple unused chunks and VM 18201a4d7607STejun Heo * areas can be scarce. Destroy all free chunks except for one. 18211a4d7607STejun Heo */ 1822ccea34b5STejun Heo mutex_lock(&pcpu_alloc_mutex); 1823ccea34b5STejun Heo spin_lock_irq(&pcpu_lock); 1824a56dbddfSTejun Heo 1825fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, free_head, list) { 18268d408b4bSTejun Heo WARN_ON(chunk->immutable); 1827a56dbddfSTejun Heo 1828a56dbddfSTejun Heo /* spare the first one */ 1829fe6bd8c3STejun Heo if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1830a56dbddfSTejun Heo continue; 1831a56dbddfSTejun Heo 1832fe6bd8c3STejun Heo list_move(&chunk->list, &to_free); 1833a56dbddfSTejun Heo } 1834a56dbddfSTejun Heo 1835ccea34b5STejun Heo spin_unlock_irq(&pcpu_lock); 1836a56dbddfSTejun Heo 1837fe6bd8c3STejun Heo list_for_each_entry_safe(chunk, next, &to_free, list) { 1838e837dfdeSDennis Zhou unsigned int rs, re; 1839dca49645STejun Heo 1840e837dfdeSDennis Zhou bitmap_for_each_set_region(chunk->populated, rs, re, 0, 184191e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 1842a93ace48STejun Heo pcpu_depopulate_chunk(chunk, rs, re); 1843b539b87fSTejun Heo spin_lock_irq(&pcpu_lock); 1844b539b87fSTejun Heo pcpu_chunk_depopulated(chunk, rs, re); 1845b539b87fSTejun Heo spin_unlock_irq(&pcpu_lock); 1846a93ace48STejun Heo } 18476081089fSTejun Heo pcpu_destroy_chunk(chunk); 1848accd4f36SEric Dumazet cond_resched(); 1849fbf59bc9STejun Heo } 1850971f3918STejun Heo 18511a4d7607STejun Heo /* 18521a4d7607STejun Heo * Ensure there are certain number of free populated pages for 18531a4d7607STejun Heo * atomic allocs. Fill up from the most packed so that atomic 18541a4d7607STejun Heo * allocs don't increase fragmentation. If atomic allocation 18551a4d7607STejun Heo * failed previously, always populate the maximum amount. This 18561a4d7607STejun Heo * should prevent atomic allocs larger than PAGE_SIZE from keeping 18571a4d7607STejun Heo * failing indefinitely; however, large atomic allocs are not 18581a4d7607STejun Heo * something we support properly and can be highly unreliable and 18591a4d7607STejun Heo * inefficient. 18601a4d7607STejun Heo */ 18611a4d7607STejun Heo retry_pop: 18621a4d7607STejun Heo if (pcpu_atomic_alloc_failed) { 18631a4d7607STejun Heo nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 18641a4d7607STejun Heo /* best effort anyway, don't worry about synchronization */ 18651a4d7607STejun Heo pcpu_atomic_alloc_failed = false; 18661a4d7607STejun Heo } else { 18671a4d7607STejun Heo nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 18681a4d7607STejun Heo pcpu_nr_empty_pop_pages, 18691a4d7607STejun Heo 0, PCPU_EMPTY_POP_PAGES_HIGH); 18701a4d7607STejun Heo } 18711a4d7607STejun Heo 18721a4d7607STejun Heo for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { 1873e837dfdeSDennis Zhou unsigned int nr_unpop = 0, rs, re; 18741a4d7607STejun Heo 18751a4d7607STejun Heo if (!nr_to_pop) 18761a4d7607STejun Heo break; 18771a4d7607STejun Heo 18781a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 18791a4d7607STejun Heo list_for_each_entry(chunk, &pcpu_slot[slot], list) { 18808ab16c43SDennis Zhou (Facebook) nr_unpop = chunk->nr_pages - chunk->nr_populated; 18811a4d7607STejun Heo if (nr_unpop) 18821a4d7607STejun Heo break; 18831a4d7607STejun Heo } 18841a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 18851a4d7607STejun Heo 18861a4d7607STejun Heo if (!nr_unpop) 18871a4d7607STejun Heo continue; 18881a4d7607STejun Heo 18891a4d7607STejun Heo /* @chunk can't go away while pcpu_alloc_mutex is held */ 1890e837dfdeSDennis Zhou bitmap_for_each_clear_region(chunk->populated, rs, re, 0, 189191e914c5SDennis Zhou (Facebook) chunk->nr_pages) { 1892e837dfdeSDennis Zhou int nr = min_t(int, re - rs, nr_to_pop); 18931a4d7607STejun Heo 189447504ee0SDennis Zhou ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); 18951a4d7607STejun Heo if (!ret) { 18961a4d7607STejun Heo nr_to_pop -= nr; 18971a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 1898b239f7daSDennis Zhou pcpu_chunk_populated(chunk, rs, rs + nr); 18991a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 19001a4d7607STejun Heo } else { 19011a4d7607STejun Heo nr_to_pop = 0; 19021a4d7607STejun Heo } 19031a4d7607STejun Heo 19041a4d7607STejun Heo if (!nr_to_pop) 19051a4d7607STejun Heo break; 19061a4d7607STejun Heo } 19071a4d7607STejun Heo } 19081a4d7607STejun Heo 19091a4d7607STejun Heo if (nr_to_pop) { 19101a4d7607STejun Heo /* ran out of chunks to populate, create a new one and retry */ 191147504ee0SDennis Zhou chunk = pcpu_create_chunk(gfp); 19121a4d7607STejun Heo if (chunk) { 19131a4d7607STejun Heo spin_lock_irq(&pcpu_lock); 19141a4d7607STejun Heo pcpu_chunk_relocate(chunk, -1); 19151a4d7607STejun Heo spin_unlock_irq(&pcpu_lock); 19161a4d7607STejun Heo goto retry_pop; 19171a4d7607STejun Heo } 19181a4d7607STejun Heo } 19191a4d7607STejun Heo 1920971f3918STejun Heo mutex_unlock(&pcpu_alloc_mutex); 1921a56dbddfSTejun Heo } 1922fbf59bc9STejun Heo 1923fbf59bc9STejun Heo /** 1924fbf59bc9STejun Heo * free_percpu - free percpu area 1925fbf59bc9STejun Heo * @ptr: pointer to area to free 1926fbf59bc9STejun Heo * 1927ccea34b5STejun Heo * Free percpu area @ptr. 1928ccea34b5STejun Heo * 1929ccea34b5STejun Heo * CONTEXT: 1930ccea34b5STejun Heo * Can be called from atomic context. 1931fbf59bc9STejun Heo */ 193243cf38ebSTejun Heo void free_percpu(void __percpu *ptr) 1933fbf59bc9STejun Heo { 1934129182e5SAndrew Morton void *addr; 1935fbf59bc9STejun Heo struct pcpu_chunk *chunk; 1936ccea34b5STejun Heo unsigned long flags; 193740064aecSDennis Zhou (Facebook) int off; 1938198790d9SJohn Sperbeck bool need_balance = false; 1939fbf59bc9STejun Heo 1940fbf59bc9STejun Heo if (!ptr) 1941fbf59bc9STejun Heo return; 1942fbf59bc9STejun Heo 1943f528f0b8SCatalin Marinas kmemleak_free_percpu(ptr); 1944f528f0b8SCatalin Marinas 1945129182e5SAndrew Morton addr = __pcpu_ptr_to_addr(ptr); 1946129182e5SAndrew Morton 1947ccea34b5STejun Heo spin_lock_irqsave(&pcpu_lock, flags); 1948fbf59bc9STejun Heo 1949fbf59bc9STejun Heo chunk = pcpu_chunk_addr_search(addr); 1950bba174f5STejun Heo off = addr - chunk->base_addr; 1951fbf59bc9STejun Heo 195240064aecSDennis Zhou (Facebook) pcpu_free_area(chunk, off); 1953fbf59bc9STejun Heo 1954a56dbddfSTejun Heo /* if there are more than one fully free chunks, wake up grim reaper */ 195540064aecSDennis Zhou (Facebook) if (chunk->free_bytes == pcpu_unit_size) { 1956fbf59bc9STejun Heo struct pcpu_chunk *pos; 1957fbf59bc9STejun Heo 1958a56dbddfSTejun Heo list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1959fbf59bc9STejun Heo if (pos != chunk) { 1960198790d9SJohn Sperbeck need_balance = true; 1961fbf59bc9STejun Heo break; 1962fbf59bc9STejun Heo } 1963fbf59bc9STejun Heo } 1964fbf59bc9STejun Heo 1965df95e795SDennis Zhou trace_percpu_free_percpu(chunk->base_addr, off, ptr); 1966df95e795SDennis Zhou 1967ccea34b5STejun Heo spin_unlock_irqrestore(&pcpu_lock, flags); 1968198790d9SJohn Sperbeck 1969198790d9SJohn Sperbeck if (need_balance) 1970198790d9SJohn Sperbeck pcpu_schedule_balance_work(); 1971fbf59bc9STejun Heo } 1972fbf59bc9STejun Heo EXPORT_SYMBOL_GPL(free_percpu); 1973fbf59bc9STejun Heo 1974383776faSThomas Gleixner bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 1975383776faSThomas Gleixner { 1976383776faSThomas Gleixner #ifdef CONFIG_SMP 1977383776faSThomas Gleixner const size_t static_size = __per_cpu_end - __per_cpu_start; 1978383776faSThomas Gleixner void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 1979383776faSThomas Gleixner unsigned int cpu; 1980383776faSThomas Gleixner 1981383776faSThomas Gleixner for_each_possible_cpu(cpu) { 1982383776faSThomas Gleixner void *start = per_cpu_ptr(base, cpu); 1983383776faSThomas Gleixner void *va = (void *)addr; 1984383776faSThomas Gleixner 1985383776faSThomas Gleixner if (va >= start && va < start + static_size) { 19868ce371f9SPeter Zijlstra if (can_addr) { 1987383776faSThomas Gleixner *can_addr = (unsigned long) (va - start); 19888ce371f9SPeter Zijlstra *can_addr += (unsigned long) 19898ce371f9SPeter Zijlstra per_cpu_ptr(base, get_boot_cpu_id()); 19908ce371f9SPeter Zijlstra } 1991383776faSThomas Gleixner return true; 1992383776faSThomas Gleixner } 1993383776faSThomas Gleixner } 1994383776faSThomas Gleixner #endif 1995383776faSThomas Gleixner /* on UP, can't distinguish from other static vars, always false */ 1996383776faSThomas Gleixner return false; 1997383776faSThomas Gleixner } 1998383776faSThomas Gleixner 19993b034b0dSVivek Goyal /** 200010fad5e4STejun Heo * is_kernel_percpu_address - test whether address is from static percpu area 200110fad5e4STejun Heo * @addr: address to test 200210fad5e4STejun Heo * 200310fad5e4STejun Heo * Test whether @addr belongs to in-kernel static percpu area. Module 200410fad5e4STejun Heo * static percpu areas are not considered. For those, use 200510fad5e4STejun Heo * is_module_percpu_address(). 200610fad5e4STejun Heo * 200710fad5e4STejun Heo * RETURNS: 200810fad5e4STejun Heo * %true if @addr is from in-kernel static percpu area, %false otherwise. 200910fad5e4STejun Heo */ 201010fad5e4STejun Heo bool is_kernel_percpu_address(unsigned long addr) 201110fad5e4STejun Heo { 2012383776faSThomas Gleixner return __is_kernel_percpu_address(addr, NULL); 201310fad5e4STejun Heo } 201410fad5e4STejun Heo 201510fad5e4STejun Heo /** 20163b034b0dSVivek Goyal * per_cpu_ptr_to_phys - convert translated percpu address to physical address 20173b034b0dSVivek Goyal * @addr: the address to be converted to physical address 20183b034b0dSVivek Goyal * 20193b034b0dSVivek Goyal * Given @addr which is dereferenceable address obtained via one of 20203b034b0dSVivek Goyal * percpu access macros, this function translates it into its physical 20213b034b0dSVivek Goyal * address. The caller is responsible for ensuring @addr stays valid 20223b034b0dSVivek Goyal * until this function finishes. 20233b034b0dSVivek Goyal * 202467589c71SDave Young * percpu allocator has special setup for the first chunk, which currently 202567589c71SDave Young * supports either embedding in linear address space or vmalloc mapping, 202667589c71SDave Young * and, from the second one, the backing allocator (currently either vm or 202767589c71SDave Young * km) provides translation. 202867589c71SDave Young * 2029bffc4375SYannick Guerrini * The addr can be translated simply without checking if it falls into the 203067589c71SDave Young * first chunk. But the current code reflects better how percpu allocator 203167589c71SDave Young * actually works, and the verification can discover both bugs in percpu 203267589c71SDave Young * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 203367589c71SDave Young * code. 203467589c71SDave Young * 20353b034b0dSVivek Goyal * RETURNS: 20363b034b0dSVivek Goyal * The physical address for @addr. 20373b034b0dSVivek Goyal */ 20383b034b0dSVivek Goyal phys_addr_t per_cpu_ptr_to_phys(void *addr) 20393b034b0dSVivek Goyal { 20409983b6f0STejun Heo void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 20419983b6f0STejun Heo bool in_first_chunk = false; 2042a855b84cSTejun Heo unsigned long first_low, first_high; 20439983b6f0STejun Heo unsigned int cpu; 20449983b6f0STejun Heo 20459983b6f0STejun Heo /* 2046a855b84cSTejun Heo * The following test on unit_low/high isn't strictly 20479983b6f0STejun Heo * necessary but will speed up lookups of addresses which 20489983b6f0STejun Heo * aren't in the first chunk. 2049c0ebfdc3SDennis Zhou (Facebook) * 2050c0ebfdc3SDennis Zhou (Facebook) * The address check is against full chunk sizes. pcpu_base_addr 2051c0ebfdc3SDennis Zhou (Facebook) * points to the beginning of the first chunk including the 2052c0ebfdc3SDennis Zhou (Facebook) * static region. Assumes good intent as the first chunk may 2053c0ebfdc3SDennis Zhou (Facebook) * not be full (ie. < pcpu_unit_pages in size). 20549983b6f0STejun Heo */ 2055c0ebfdc3SDennis Zhou (Facebook) first_low = (unsigned long)pcpu_base_addr + 2056c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 2057c0ebfdc3SDennis Zhou (Facebook) first_high = (unsigned long)pcpu_base_addr + 2058c0ebfdc3SDennis Zhou (Facebook) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 2059a855b84cSTejun Heo if ((unsigned long)addr >= first_low && 2060a855b84cSTejun Heo (unsigned long)addr < first_high) { 20619983b6f0STejun Heo for_each_possible_cpu(cpu) { 20629983b6f0STejun Heo void *start = per_cpu_ptr(base, cpu); 20639983b6f0STejun Heo 20649983b6f0STejun Heo if (addr >= start && addr < start + pcpu_unit_size) { 20659983b6f0STejun Heo in_first_chunk = true; 20669983b6f0STejun Heo break; 20679983b6f0STejun Heo } 20689983b6f0STejun Heo } 20699983b6f0STejun Heo } 20709983b6f0STejun Heo 20719983b6f0STejun Heo if (in_first_chunk) { 2072eac522efSDavid Howells if (!is_vmalloc_addr(addr)) 20733b034b0dSVivek Goyal return __pa(addr); 20743b034b0dSVivek Goyal else 20759f57bd4dSEugene Surovegin return page_to_phys(vmalloc_to_page(addr)) + 20769f57bd4dSEugene Surovegin offset_in_page(addr); 2077020ec653STejun Heo } else 20789f57bd4dSEugene Surovegin return page_to_phys(pcpu_addr_to_page(addr)) + 20799f57bd4dSEugene Surovegin offset_in_page(addr); 20803b034b0dSVivek Goyal } 20813b034b0dSVivek Goyal 2082fbf59bc9STejun Heo /** 2083fd1e8a1fSTejun Heo * pcpu_alloc_alloc_info - allocate percpu allocation info 2084fd1e8a1fSTejun Heo * @nr_groups: the number of groups 2085fd1e8a1fSTejun Heo * @nr_units: the number of units 2086033e48fbSTejun Heo * 2087fd1e8a1fSTejun Heo * Allocate ai which is large enough for @nr_groups groups containing 2088fd1e8a1fSTejun Heo * @nr_units units. The returned ai's groups[0].cpu_map points to the 2089fd1e8a1fSTejun Heo * cpu_map array which is long enough for @nr_units and filled with 2090fd1e8a1fSTejun Heo * NR_CPUS. It's the caller's responsibility to initialize cpu_map 2091fd1e8a1fSTejun Heo * pointer of other groups. 2092033e48fbSTejun Heo * 2093033e48fbSTejun Heo * RETURNS: 2094fd1e8a1fSTejun Heo * Pointer to the allocated pcpu_alloc_info on success, NULL on 2095fd1e8a1fSTejun Heo * failure. 2096033e48fbSTejun Heo */ 2097fd1e8a1fSTejun Heo struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 2098fd1e8a1fSTejun Heo int nr_units) 2099fd1e8a1fSTejun Heo { 2100fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 2101fd1e8a1fSTejun Heo size_t base_size, ai_size; 2102fd1e8a1fSTejun Heo void *ptr; 2103fd1e8a1fSTejun Heo int unit; 2104fd1e8a1fSTejun Heo 210514d37612SGustavo A. R. Silva base_size = ALIGN(struct_size(ai, groups, nr_groups), 2106fd1e8a1fSTejun Heo __alignof__(ai->groups[0].cpu_map[0])); 2107fd1e8a1fSTejun Heo ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 2108fd1e8a1fSTejun Heo 210926fb3daeSMike Rapoport ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); 2110fd1e8a1fSTejun Heo if (!ptr) 2111fd1e8a1fSTejun Heo return NULL; 2112fd1e8a1fSTejun Heo ai = ptr; 2113fd1e8a1fSTejun Heo ptr += base_size; 2114fd1e8a1fSTejun Heo 2115fd1e8a1fSTejun Heo ai->groups[0].cpu_map = ptr; 2116fd1e8a1fSTejun Heo 2117fd1e8a1fSTejun Heo for (unit = 0; unit < nr_units; unit++) 2118fd1e8a1fSTejun Heo ai->groups[0].cpu_map[unit] = NR_CPUS; 2119fd1e8a1fSTejun Heo 2120fd1e8a1fSTejun Heo ai->nr_groups = nr_groups; 2121fd1e8a1fSTejun Heo ai->__ai_size = PFN_ALIGN(ai_size); 2122fd1e8a1fSTejun Heo 2123fd1e8a1fSTejun Heo return ai; 2124fd1e8a1fSTejun Heo } 2125fd1e8a1fSTejun Heo 2126fd1e8a1fSTejun Heo /** 2127fd1e8a1fSTejun Heo * pcpu_free_alloc_info - free percpu allocation info 2128fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info to free 2129fd1e8a1fSTejun Heo * 2130fd1e8a1fSTejun Heo * Free @ai which was allocated by pcpu_alloc_alloc_info(). 2131fd1e8a1fSTejun Heo */ 2132fd1e8a1fSTejun Heo void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2133fd1e8a1fSTejun Heo { 2134999c17e3SSantosh Shilimkar memblock_free_early(__pa(ai), ai->__ai_size); 2135fd1e8a1fSTejun Heo } 2136fd1e8a1fSTejun Heo 2137fd1e8a1fSTejun Heo /** 2138fd1e8a1fSTejun Heo * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 2139fd1e8a1fSTejun Heo * @lvl: loglevel 2140fd1e8a1fSTejun Heo * @ai: allocation info to dump 2141fd1e8a1fSTejun Heo * 2142fd1e8a1fSTejun Heo * Print out information about @ai using loglevel @lvl. 2143fd1e8a1fSTejun Heo */ 2144fd1e8a1fSTejun Heo static void pcpu_dump_alloc_info(const char *lvl, 2145fd1e8a1fSTejun Heo const struct pcpu_alloc_info *ai) 2146033e48fbSTejun Heo { 2147fd1e8a1fSTejun Heo int group_width = 1, cpu_width = 1, width; 2148033e48fbSTejun Heo char empty_str[] = "--------"; 2149fd1e8a1fSTejun Heo int alloc = 0, alloc_end = 0; 2150fd1e8a1fSTejun Heo int group, v; 2151fd1e8a1fSTejun Heo int upa, apl; /* units per alloc, allocs per line */ 2152033e48fbSTejun Heo 2153fd1e8a1fSTejun Heo v = ai->nr_groups; 2154033e48fbSTejun Heo while (v /= 10) 2155fd1e8a1fSTejun Heo group_width++; 2156033e48fbSTejun Heo 2157fd1e8a1fSTejun Heo v = num_possible_cpus(); 2158fd1e8a1fSTejun Heo while (v /= 10) 2159fd1e8a1fSTejun Heo cpu_width++; 2160fd1e8a1fSTejun Heo empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 2161033e48fbSTejun Heo 2162fd1e8a1fSTejun Heo upa = ai->alloc_size / ai->unit_size; 2163fd1e8a1fSTejun Heo width = upa * (cpu_width + 1) + group_width + 3; 2164fd1e8a1fSTejun Heo apl = rounddown_pow_of_two(max(60 / width, 1)); 2165033e48fbSTejun Heo 2166fd1e8a1fSTejun Heo printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 2167fd1e8a1fSTejun Heo lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 2168fd1e8a1fSTejun Heo ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 2169fd1e8a1fSTejun Heo 2170fd1e8a1fSTejun Heo for (group = 0; group < ai->nr_groups; group++) { 2171fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 2172fd1e8a1fSTejun Heo int unit = 0, unit_end = 0; 2173fd1e8a1fSTejun Heo 2174fd1e8a1fSTejun Heo BUG_ON(gi->nr_units % upa); 2175fd1e8a1fSTejun Heo for (alloc_end += gi->nr_units / upa; 2176fd1e8a1fSTejun Heo alloc < alloc_end; alloc++) { 2177fd1e8a1fSTejun Heo if (!(alloc % apl)) { 21781170532bSJoe Perches pr_cont("\n"); 2179fd1e8a1fSTejun Heo printk("%spcpu-alloc: ", lvl); 2180033e48fbSTejun Heo } 21811170532bSJoe Perches pr_cont("[%0*d] ", group_width, group); 2182fd1e8a1fSTejun Heo 2183fd1e8a1fSTejun Heo for (unit_end += upa; unit < unit_end; unit++) 2184fd1e8a1fSTejun Heo if (gi->cpu_map[unit] != NR_CPUS) 21851170532bSJoe Perches pr_cont("%0*d ", 21861170532bSJoe Perches cpu_width, gi->cpu_map[unit]); 2187033e48fbSTejun Heo else 21881170532bSJoe Perches pr_cont("%s ", empty_str); 2189033e48fbSTejun Heo } 2190fd1e8a1fSTejun Heo } 21911170532bSJoe Perches pr_cont("\n"); 2192033e48fbSTejun Heo } 2193033e48fbSTejun Heo 2194fbf59bc9STejun Heo /** 21958d408b4bSTejun Heo * pcpu_setup_first_chunk - initialize the first percpu chunk 2196fd1e8a1fSTejun Heo * @ai: pcpu_alloc_info describing how to percpu area is shaped 219738a6be52STejun Heo * @base_addr: mapped address 2198fbf59bc9STejun Heo * 21998d408b4bSTejun Heo * Initialize the first percpu chunk which contains the kernel static 220069ab285bSChristophe JAILLET * percpu area. This function is to be called from arch percpu area 220138a6be52STejun Heo * setup path. 22028d408b4bSTejun Heo * 2203fd1e8a1fSTejun Heo * @ai contains all information necessary to initialize the first 2204fd1e8a1fSTejun Heo * chunk and prime the dynamic percpu allocator. 22058d408b4bSTejun Heo * 2206fd1e8a1fSTejun Heo * @ai->static_size is the size of static percpu area. 2207fd1e8a1fSTejun Heo * 2208fd1e8a1fSTejun Heo * @ai->reserved_size, if non-zero, specifies the amount of bytes to 2209edcb4639STejun Heo * reserve after the static area in the first chunk. This reserves 2210edcb4639STejun Heo * the first chunk such that it's available only through reserved 2211edcb4639STejun Heo * percpu allocation. This is primarily used to serve module percpu 2212edcb4639STejun Heo * static areas on architectures where the addressing model has 2213edcb4639STejun Heo * limited offset range for symbol relocations to guarantee module 2214edcb4639STejun Heo * percpu symbols fall inside the relocatable range. 2215edcb4639STejun Heo * 2216fd1e8a1fSTejun Heo * @ai->dyn_size determines the number of bytes available for dynamic 2217fd1e8a1fSTejun Heo * allocation in the first chunk. The area between @ai->static_size + 2218fd1e8a1fSTejun Heo * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 22196074d5b0STejun Heo * 2220fd1e8a1fSTejun Heo * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 2221fd1e8a1fSTejun Heo * and equal to or larger than @ai->static_size + @ai->reserved_size + 2222fd1e8a1fSTejun Heo * @ai->dyn_size. 22238d408b4bSTejun Heo * 2224fd1e8a1fSTejun Heo * @ai->atom_size is the allocation atom size and used as alignment 2225fd1e8a1fSTejun Heo * for vm areas. 22268d408b4bSTejun Heo * 2227fd1e8a1fSTejun Heo * @ai->alloc_size is the allocation size and always multiple of 2228fd1e8a1fSTejun Heo * @ai->atom_size. This is larger than @ai->atom_size if 2229fd1e8a1fSTejun Heo * @ai->unit_size is larger than @ai->atom_size. 2230fd1e8a1fSTejun Heo * 2231fd1e8a1fSTejun Heo * @ai->nr_groups and @ai->groups describe virtual memory layout of 2232fd1e8a1fSTejun Heo * percpu areas. Units which should be colocated are put into the 2233fd1e8a1fSTejun Heo * same group. Dynamic VM areas will be allocated according to these 2234fd1e8a1fSTejun Heo * groupings. If @ai->nr_groups is zero, a single group containing 2235fd1e8a1fSTejun Heo * all units is assumed. 22368d408b4bSTejun Heo * 223738a6be52STejun Heo * The caller should have mapped the first chunk at @base_addr and 223838a6be52STejun Heo * copied static data to each unit. 2239fbf59bc9STejun Heo * 2240c0ebfdc3SDennis Zhou (Facebook) * The first chunk will always contain a static and a dynamic region. 2241c0ebfdc3SDennis Zhou (Facebook) * However, the static region is not managed by any chunk. If the first 2242c0ebfdc3SDennis Zhou (Facebook) * chunk also contains a reserved region, it is served by two chunks - 2243c0ebfdc3SDennis Zhou (Facebook) * one for the reserved region and one for the dynamic region. They 2244c0ebfdc3SDennis Zhou (Facebook) * share the same vm, but use offset regions in the area allocation map. 2245c0ebfdc3SDennis Zhou (Facebook) * The chunk serving the dynamic region is circulated in the chunk slots 2246c0ebfdc3SDennis Zhou (Facebook) * and available for dynamic allocation like any other chunk. 2247fbf59bc9STejun Heo */ 2248163fa234SKefeng Wang void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 2249fd1e8a1fSTejun Heo void *base_addr) 2250fbf59bc9STejun Heo { 2251b9c39442SDennis Zhou (Facebook) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2252d2f3c384SDennis Zhou (Facebook) size_t static_size, dyn_size; 22530c4169c3SDennis Zhou (Facebook) struct pcpu_chunk *chunk; 22546563297cSTejun Heo unsigned long *group_offsets; 22556563297cSTejun Heo size_t *group_sizes; 2256fb435d52STejun Heo unsigned long *unit_off; 2257fbf59bc9STejun Heo unsigned int cpu; 2258fd1e8a1fSTejun Heo int *unit_map; 2259fd1e8a1fSTejun Heo int group, unit, i; 2260c0ebfdc3SDennis Zhou (Facebook) int map_size; 2261c0ebfdc3SDennis Zhou (Facebook) unsigned long tmp_addr; 2262f655f405SMike Rapoport size_t alloc_size; 2263fbf59bc9STejun Heo 2264635b75fcSTejun Heo #define PCPU_SETUP_BUG_ON(cond) do { \ 2265635b75fcSTejun Heo if (unlikely(cond)) { \ 2266870d4b12SJoe Perches pr_emerg("failed to initialize, %s\n", #cond); \ 2267870d4b12SJoe Perches pr_emerg("cpu_possible_mask=%*pb\n", \ 2268807de073STejun Heo cpumask_pr_args(cpu_possible_mask)); \ 2269635b75fcSTejun Heo pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2270635b75fcSTejun Heo BUG(); \ 2271635b75fcSTejun Heo } \ 2272635b75fcSTejun Heo } while (0) 2273635b75fcSTejun Heo 22742f39e637STejun Heo /* sanity checks */ 2275635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2276bbddff05STejun Heo #ifdef CONFIG_SMP 2277635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!ai->static_size); 2278f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2279bbddff05STejun Heo #endif 2280635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!base_addr); 2281f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2282635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2283f09f1243SAlexander Kuleshov PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2284635b75fcSTejun Heo PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2285ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2286099a19d9STejun Heo PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2287fb29a2ccSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!ai->dyn_size); 2288d2f3c384SDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2289ca460b3cSDennis Zhou (Facebook) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2290ca460b3cSDennis Zhou (Facebook) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 22919f645532STejun Heo PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 22928d408b4bSTejun Heo 22936563297cSTejun Heo /* process group information and build config tables accordingly */ 2294f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_offsets[0]); 2295f655f405SMike Rapoport group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2296f655f405SMike Rapoport if (!group_offsets) 2297f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2298f655f405SMike Rapoport alloc_size); 2299f655f405SMike Rapoport 2300f655f405SMike Rapoport alloc_size = ai->nr_groups * sizeof(group_sizes[0]); 2301f655f405SMike Rapoport group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2302f655f405SMike Rapoport if (!group_sizes) 2303f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2304f655f405SMike Rapoport alloc_size); 2305f655f405SMike Rapoport 2306f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_map[0]); 2307f655f405SMike Rapoport unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2308f655f405SMike Rapoport if (!unit_map) 2309f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2310f655f405SMike Rapoport alloc_size); 2311f655f405SMike Rapoport 2312f655f405SMike Rapoport alloc_size = nr_cpu_ids * sizeof(unit_off[0]); 2313f655f405SMike Rapoport unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2314f655f405SMike Rapoport if (!unit_off) 2315f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2316f655f405SMike Rapoport alloc_size); 23172f39e637STejun Heo 2318fd1e8a1fSTejun Heo for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2319ffe0d5a5STejun Heo unit_map[cpu] = UINT_MAX; 2320a855b84cSTejun Heo 2321a855b84cSTejun Heo pcpu_low_unit_cpu = NR_CPUS; 2322a855b84cSTejun Heo pcpu_high_unit_cpu = NR_CPUS; 23232f39e637STejun Heo 2324fd1e8a1fSTejun Heo for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2325fd1e8a1fSTejun Heo const struct pcpu_group_info *gi = &ai->groups[group]; 23262f39e637STejun Heo 23276563297cSTejun Heo group_offsets[group] = gi->base_offset; 23286563297cSTejun Heo group_sizes[group] = gi->nr_units * ai->unit_size; 23296563297cSTejun Heo 2330fd1e8a1fSTejun Heo for (i = 0; i < gi->nr_units; i++) { 2331fd1e8a1fSTejun Heo cpu = gi->cpu_map[i]; 2332fd1e8a1fSTejun Heo if (cpu == NR_CPUS) 2333fd1e8a1fSTejun Heo continue; 2334fd1e8a1fSTejun Heo 23359f295664SDan Carpenter PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2336635b75fcSTejun Heo PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2337635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2338fd1e8a1fSTejun Heo 2339fd1e8a1fSTejun Heo unit_map[cpu] = unit + i; 2340fb435d52STejun Heo unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2341fb435d52STejun Heo 2342a855b84cSTejun Heo /* determine low/high unit_cpu */ 2343a855b84cSTejun Heo if (pcpu_low_unit_cpu == NR_CPUS || 2344a855b84cSTejun Heo unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2345a855b84cSTejun Heo pcpu_low_unit_cpu = cpu; 2346a855b84cSTejun Heo if (pcpu_high_unit_cpu == NR_CPUS || 2347a855b84cSTejun Heo unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2348a855b84cSTejun Heo pcpu_high_unit_cpu = cpu; 23490fc0531eSLinus Torvalds } 23500fc0531eSLinus Torvalds } 2351fd1e8a1fSTejun Heo pcpu_nr_units = unit; 23522f39e637STejun Heo 23532f39e637STejun Heo for_each_possible_cpu(cpu) 2354635b75fcSTejun Heo PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2355635b75fcSTejun Heo 2356635b75fcSTejun Heo /* we're done parsing the input, undefine BUG macro and dump config */ 2357635b75fcSTejun Heo #undef PCPU_SETUP_BUG_ON 2358bcbea798STejun Heo pcpu_dump_alloc_info(KERN_DEBUG, ai); 23592f39e637STejun Heo 23606563297cSTejun Heo pcpu_nr_groups = ai->nr_groups; 23616563297cSTejun Heo pcpu_group_offsets = group_offsets; 23626563297cSTejun Heo pcpu_group_sizes = group_sizes; 2363fd1e8a1fSTejun Heo pcpu_unit_map = unit_map; 2364fb435d52STejun Heo pcpu_unit_offsets = unit_off; 23652f39e637STejun Heo 23662f39e637STejun Heo /* determine basic parameters */ 2367fd1e8a1fSTejun Heo pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2368d9b55eebSTejun Heo pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 23696563297cSTejun Heo pcpu_atom_size = ai->atom_size; 2370ce3141a2STejun Heo pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 2371ce3141a2STejun Heo BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 2372cafe8816STejun Heo 237330a5b536SDennis Zhou pcpu_stats_save_ai(ai); 237430a5b536SDennis Zhou 2375d9b55eebSTejun Heo /* 2376d9b55eebSTejun Heo * Allocate chunk slots. The additional last slot is for 2377d9b55eebSTejun Heo * empty chunks. 2378d9b55eebSTejun Heo */ 2379d9b55eebSTejun Heo pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 23807e1c4e27SMike Rapoport pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]), 23817e1c4e27SMike Rapoport SMP_CACHE_BYTES); 2382f655f405SMike Rapoport if (!pcpu_slot) 2383f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2384f655f405SMike Rapoport pcpu_nr_slots * sizeof(pcpu_slot[0])); 2385fbf59bc9STejun Heo for (i = 0; i < pcpu_nr_slots; i++) 2386fbf59bc9STejun Heo INIT_LIST_HEAD(&pcpu_slot[i]); 2387fbf59bc9STejun Heo 2388edcb4639STejun Heo /* 2389d2f3c384SDennis Zhou (Facebook) * The end of the static region needs to be aligned with the 2390d2f3c384SDennis Zhou (Facebook) * minimum allocation size as this offsets the reserved and 2391d2f3c384SDennis Zhou (Facebook) * dynamic region. The first chunk ends page aligned by 2392d2f3c384SDennis Zhou (Facebook) * expanding the dynamic region, therefore the dynamic region 2393d2f3c384SDennis Zhou (Facebook) * can be shrunk to compensate while still staying above the 2394d2f3c384SDennis Zhou (Facebook) * configured sizes. 2395d2f3c384SDennis Zhou (Facebook) */ 2396d2f3c384SDennis Zhou (Facebook) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2397d2f3c384SDennis Zhou (Facebook) dyn_size = ai->dyn_size - (static_size - ai->static_size); 2398d2f3c384SDennis Zhou (Facebook) 2399d2f3c384SDennis Zhou (Facebook) /* 2400c0ebfdc3SDennis Zhou (Facebook) * Initialize first chunk. 2401c0ebfdc3SDennis Zhou (Facebook) * If the reserved_size is non-zero, this initializes the reserved 2402c0ebfdc3SDennis Zhou (Facebook) * chunk. If the reserved_size is zero, the reserved chunk is NULL 2403c0ebfdc3SDennis Zhou (Facebook) * and the dynamic region is initialized here. The first chunk, 2404c0ebfdc3SDennis Zhou (Facebook) * pcpu_first_chunk, will always point to the chunk that serves 2405c0ebfdc3SDennis Zhou (Facebook) * the dynamic region. 2406edcb4639STejun Heo */ 2407d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size; 2408d2f3c384SDennis Zhou (Facebook) map_size = ai->reserved_size ?: dyn_size; 240940064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 241061ace7faSTejun Heo 2411edcb4639STejun Heo /* init dynamic chunk if necessary */ 2412b9c39442SDennis Zhou (Facebook) if (ai->reserved_size) { 24130c4169c3SDennis Zhou (Facebook) pcpu_reserved_chunk = chunk; 2414b9c39442SDennis Zhou (Facebook) 2415d2f3c384SDennis Zhou (Facebook) tmp_addr = (unsigned long)base_addr + static_size + 2416c0ebfdc3SDennis Zhou (Facebook) ai->reserved_size; 2417d2f3c384SDennis Zhou (Facebook) map_size = dyn_size; 241840064aecSDennis Zhou (Facebook) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2419edcb4639STejun Heo } 2420edcb4639STejun Heo 24212441d15cSTejun Heo /* link the first chunk in */ 24220c4169c3SDennis Zhou (Facebook) pcpu_first_chunk = chunk; 24230cecf50cSDennis Zhou (Facebook) pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2424ae9e6bc9STejun Heo pcpu_chunk_relocate(pcpu_first_chunk, -1); 2425fbf59bc9STejun Heo 24267e8a6304SDennis Zhou (Facebook) /* include all regions of the first chunk */ 24277e8a6304SDennis Zhou (Facebook) pcpu_nr_populated += PFN_DOWN(size_sum); 24287e8a6304SDennis Zhou (Facebook) 242930a5b536SDennis Zhou pcpu_stats_chunk_alloc(); 2430df95e795SDennis Zhou trace_percpu_create_chunk(base_addr); 243130a5b536SDennis Zhou 2432fbf59bc9STejun Heo /* we're done */ 2433bba174f5STejun Heo pcpu_base_addr = base_addr; 2434fbf59bc9STejun Heo } 243566c3a757STejun Heo 2436bbddff05STejun Heo #ifdef CONFIG_SMP 2437bbddff05STejun Heo 243817f3609cSAndi Kleen const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2439f58dc01bSTejun Heo [PCPU_FC_AUTO] = "auto", 2440f58dc01bSTejun Heo [PCPU_FC_EMBED] = "embed", 2441f58dc01bSTejun Heo [PCPU_FC_PAGE] = "page", 2442f58dc01bSTejun Heo }; 244366c3a757STejun Heo 2444f58dc01bSTejun Heo enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2445f58dc01bSTejun Heo 2446f58dc01bSTejun Heo static int __init percpu_alloc_setup(char *str) 244766c3a757STejun Heo { 24485479c78aSCyrill Gorcunov if (!str) 24495479c78aSCyrill Gorcunov return -EINVAL; 24505479c78aSCyrill Gorcunov 2451f58dc01bSTejun Heo if (0) 2452f58dc01bSTejun Heo /* nada */; 2453f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2454f58dc01bSTejun Heo else if (!strcmp(str, "embed")) 2455f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_EMBED; 2456f58dc01bSTejun Heo #endif 2457f58dc01bSTejun Heo #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2458f58dc01bSTejun Heo else if (!strcmp(str, "page")) 2459f58dc01bSTejun Heo pcpu_chosen_fc = PCPU_FC_PAGE; 2460f58dc01bSTejun Heo #endif 2461f58dc01bSTejun Heo else 2462870d4b12SJoe Perches pr_warn("unknown allocator %s specified\n", str); 246366c3a757STejun Heo 2464f58dc01bSTejun Heo return 0; 246566c3a757STejun Heo } 2466f58dc01bSTejun Heo early_param("percpu_alloc", percpu_alloc_setup); 246766c3a757STejun Heo 24683c9a024fSTejun Heo /* 24693c9a024fSTejun Heo * pcpu_embed_first_chunk() is used by the generic percpu setup. 24703c9a024fSTejun Heo * Build it if needed by the arch config or the generic setup is going 24713c9a024fSTejun Heo * to be used. 24723c9a024fSTejun Heo */ 247308fc4580STejun Heo #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 247408fc4580STejun Heo !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 24753c9a024fSTejun Heo #define BUILD_EMBED_FIRST_CHUNK 24763c9a024fSTejun Heo #endif 24773c9a024fSTejun Heo 24783c9a024fSTejun Heo /* build pcpu_page_first_chunk() iff needed by the arch config */ 24793c9a024fSTejun Heo #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 24803c9a024fSTejun Heo #define BUILD_PAGE_FIRST_CHUNK 24813c9a024fSTejun Heo #endif 24823c9a024fSTejun Heo 24833c9a024fSTejun Heo /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 24843c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 24853c9a024fSTejun Heo /** 2486fbf59bc9STejun Heo * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2487fbf59bc9STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2488fbf59bc9STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2489fbf59bc9STejun Heo * @atom_size: allocation atom size 2490fbf59bc9STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2491fbf59bc9STejun Heo * 2492fbf59bc9STejun Heo * This function determines grouping of units, their mappings to cpus 2493fbf59bc9STejun Heo * and other parameters considering needed percpu size, allocation 2494fbf59bc9STejun Heo * atom size and distances between CPUs. 2495fbf59bc9STejun Heo * 2496bffc4375SYannick Guerrini * Groups are always multiples of atom size and CPUs which are of 2497fbf59bc9STejun Heo * LOCAL_DISTANCE both ways are grouped together and share space for 2498fbf59bc9STejun Heo * units in the same group. The returned configuration is guaranteed 2499fbf59bc9STejun Heo * to have CPUs on different nodes on different groups and >=75% usage 2500fbf59bc9STejun Heo * of allocated virtual address space. 2501fbf59bc9STejun Heo * 2502fbf59bc9STejun Heo * RETURNS: 2503fbf59bc9STejun Heo * On success, pointer to the new allocation_info is returned. On 2504fbf59bc9STejun Heo * failure, ERR_PTR value is returned. 2505fbf59bc9STejun Heo */ 2506fbf59bc9STejun Heo static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 2507fbf59bc9STejun Heo size_t reserved_size, size_t dyn_size, 2508fbf59bc9STejun Heo size_t atom_size, 2509fbf59bc9STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2510fbf59bc9STejun Heo { 2511fbf59bc9STejun Heo static int group_map[NR_CPUS] __initdata; 2512fbf59bc9STejun Heo static int group_cnt[NR_CPUS] __initdata; 2513fbf59bc9STejun Heo const size_t static_size = __per_cpu_end - __per_cpu_start; 2514fbf59bc9STejun Heo int nr_groups = 1, nr_units = 0; 2515fbf59bc9STejun Heo size_t size_sum, min_unit_size, alloc_size; 2516fbf59bc9STejun Heo int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 2517fbf59bc9STejun Heo int last_allocs, group, unit; 2518fbf59bc9STejun Heo unsigned int cpu, tcpu; 2519fbf59bc9STejun Heo struct pcpu_alloc_info *ai; 2520fbf59bc9STejun Heo unsigned int *cpu_map; 2521fbf59bc9STejun Heo 2522fbf59bc9STejun Heo /* this function may be called multiple times */ 2523fbf59bc9STejun Heo memset(group_map, 0, sizeof(group_map)); 2524fbf59bc9STejun Heo memset(group_cnt, 0, sizeof(group_cnt)); 2525fbf59bc9STejun Heo 2526fbf59bc9STejun Heo /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2527fbf59bc9STejun Heo size_sum = PFN_ALIGN(static_size + reserved_size + 2528fbf59bc9STejun Heo max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2529fbf59bc9STejun Heo dyn_size = size_sum - static_size - reserved_size; 2530fbf59bc9STejun Heo 2531fbf59bc9STejun Heo /* 2532fbf59bc9STejun Heo * Determine min_unit_size, alloc_size and max_upa such that 2533fbf59bc9STejun Heo * alloc_size is multiple of atom_size and is the smallest 253425985edcSLucas De Marchi * which can accommodate 4k aligned segments which are equal to 2535fbf59bc9STejun Heo * or larger than min_unit_size. 2536fbf59bc9STejun Heo */ 2537fbf59bc9STejun Heo min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2538fbf59bc9STejun Heo 25399c015162SDennis Zhou (Facebook) /* determine the maximum # of units that can fit in an allocation */ 2540fbf59bc9STejun Heo alloc_size = roundup(min_unit_size, atom_size); 2541fbf59bc9STejun Heo upa = alloc_size / min_unit_size; 2542f09f1243SAlexander Kuleshov while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2543fbf59bc9STejun Heo upa--; 2544fbf59bc9STejun Heo max_upa = upa; 2545fbf59bc9STejun Heo 2546fbf59bc9STejun Heo /* group cpus according to their proximity */ 2547fbf59bc9STejun Heo for_each_possible_cpu(cpu) { 2548fbf59bc9STejun Heo group = 0; 2549fbf59bc9STejun Heo next_group: 2550fbf59bc9STejun Heo for_each_possible_cpu(tcpu) { 2551fbf59bc9STejun Heo if (cpu == tcpu) 2552fbf59bc9STejun Heo break; 2553fbf59bc9STejun Heo if (group_map[tcpu] == group && cpu_distance_fn && 2554fbf59bc9STejun Heo (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 2555fbf59bc9STejun Heo cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 2556fbf59bc9STejun Heo group++; 2557fbf59bc9STejun Heo nr_groups = max(nr_groups, group + 1); 2558fbf59bc9STejun Heo goto next_group; 2559fbf59bc9STejun Heo } 2560fbf59bc9STejun Heo } 2561fbf59bc9STejun Heo group_map[cpu] = group; 2562fbf59bc9STejun Heo group_cnt[group]++; 2563fbf59bc9STejun Heo } 2564fbf59bc9STejun Heo 2565fbf59bc9STejun Heo /* 25669c015162SDennis Zhou (Facebook) * Wasted space is caused by a ratio imbalance of upa to group_cnt. 25679c015162SDennis Zhou (Facebook) * Expand the unit_size until we use >= 75% of the units allocated. 25689c015162SDennis Zhou (Facebook) * Related to atom_size, which could be much larger than the unit_size. 2569fbf59bc9STejun Heo */ 2570fbf59bc9STejun Heo last_allocs = INT_MAX; 2571fbf59bc9STejun Heo for (upa = max_upa; upa; upa--) { 2572fbf59bc9STejun Heo int allocs = 0, wasted = 0; 2573fbf59bc9STejun Heo 2574f09f1243SAlexander Kuleshov if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2575fbf59bc9STejun Heo continue; 2576fbf59bc9STejun Heo 2577fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2578fbf59bc9STejun Heo int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2579fbf59bc9STejun Heo allocs += this_allocs; 2580fbf59bc9STejun Heo wasted += this_allocs * upa - group_cnt[group]; 2581fbf59bc9STejun Heo } 2582fbf59bc9STejun Heo 2583fbf59bc9STejun Heo /* 2584fbf59bc9STejun Heo * Don't accept if wastage is over 1/3. The 2585fbf59bc9STejun Heo * greater-than comparison ensures upa==1 always 2586fbf59bc9STejun Heo * passes the following check. 2587fbf59bc9STejun Heo */ 2588fbf59bc9STejun Heo if (wasted > num_possible_cpus() / 3) 2589fbf59bc9STejun Heo continue; 2590fbf59bc9STejun Heo 2591fbf59bc9STejun Heo /* and then don't consume more memory */ 2592fbf59bc9STejun Heo if (allocs > last_allocs) 2593fbf59bc9STejun Heo break; 2594fbf59bc9STejun Heo last_allocs = allocs; 2595fbf59bc9STejun Heo best_upa = upa; 2596fbf59bc9STejun Heo } 2597fbf59bc9STejun Heo upa = best_upa; 2598fbf59bc9STejun Heo 2599fbf59bc9STejun Heo /* allocate and fill alloc_info */ 2600fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) 2601fbf59bc9STejun Heo nr_units += roundup(group_cnt[group], upa); 2602fbf59bc9STejun Heo 2603fbf59bc9STejun Heo ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2604fbf59bc9STejun Heo if (!ai) 2605fbf59bc9STejun Heo return ERR_PTR(-ENOMEM); 2606fbf59bc9STejun Heo cpu_map = ai->groups[0].cpu_map; 2607fbf59bc9STejun Heo 2608fbf59bc9STejun Heo for (group = 0; group < nr_groups; group++) { 2609fbf59bc9STejun Heo ai->groups[group].cpu_map = cpu_map; 2610fbf59bc9STejun Heo cpu_map += roundup(group_cnt[group], upa); 2611fbf59bc9STejun Heo } 2612fbf59bc9STejun Heo 2613fbf59bc9STejun Heo ai->static_size = static_size; 2614fbf59bc9STejun Heo ai->reserved_size = reserved_size; 2615fbf59bc9STejun Heo ai->dyn_size = dyn_size; 2616fbf59bc9STejun Heo ai->unit_size = alloc_size / upa; 2617fbf59bc9STejun Heo ai->atom_size = atom_size; 2618fbf59bc9STejun Heo ai->alloc_size = alloc_size; 2619fbf59bc9STejun Heo 26202de7852fSPeng Fan for (group = 0, unit = 0; group < nr_groups; group++) { 2621fbf59bc9STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2622fbf59bc9STejun Heo 2623fbf59bc9STejun Heo /* 2624fbf59bc9STejun Heo * Initialize base_offset as if all groups are located 2625fbf59bc9STejun Heo * back-to-back. The caller should update this to 2626fbf59bc9STejun Heo * reflect actual allocation. 2627fbf59bc9STejun Heo */ 2628fbf59bc9STejun Heo gi->base_offset = unit * ai->unit_size; 2629fbf59bc9STejun Heo 2630fbf59bc9STejun Heo for_each_possible_cpu(cpu) 2631fbf59bc9STejun Heo if (group_map[cpu] == group) 2632fbf59bc9STejun Heo gi->cpu_map[gi->nr_units++] = cpu; 2633fbf59bc9STejun Heo gi->nr_units = roundup(gi->nr_units, upa); 2634fbf59bc9STejun Heo unit += gi->nr_units; 2635fbf59bc9STejun Heo } 2636fbf59bc9STejun Heo BUG_ON(unit != nr_units); 2637fbf59bc9STejun Heo 2638fbf59bc9STejun Heo return ai; 2639fbf59bc9STejun Heo } 26403c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 2641fbf59bc9STejun Heo 26423c9a024fSTejun Heo #if defined(BUILD_EMBED_FIRST_CHUNK) 264366c3a757STejun Heo /** 264466c3a757STejun Heo * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 264566c3a757STejun Heo * @reserved_size: the size of reserved percpu area in bytes 26464ba6ce25STejun Heo * @dyn_size: minimum free size for dynamic allocation in bytes 2647c8826dd5STejun Heo * @atom_size: allocation atom size 2648c8826dd5STejun Heo * @cpu_distance_fn: callback to determine distance between cpus, optional 2649c8826dd5STejun Heo * @alloc_fn: function to allocate percpu page 265025985edcSLucas De Marchi * @free_fn: function to free percpu page 265166c3a757STejun Heo * 265266c3a757STejun Heo * This is a helper to ease setting up embedded first percpu chunk and 265366c3a757STejun Heo * can be called where pcpu_setup_first_chunk() is expected. 265466c3a757STejun Heo * 265566c3a757STejun Heo * If this function is used to setup the first chunk, it is allocated 2656c8826dd5STejun Heo * by calling @alloc_fn and used as-is without being mapped into 2657c8826dd5STejun Heo * vmalloc area. Allocations are always whole multiples of @atom_size 2658c8826dd5STejun Heo * aligned to @atom_size. 2659c8826dd5STejun Heo * 2660c8826dd5STejun Heo * This enables the first chunk to piggy back on the linear physical 2661c8826dd5STejun Heo * mapping which often uses larger page size. Please note that this 2662c8826dd5STejun Heo * can result in very sparse cpu->unit mapping on NUMA machines thus 2663c8826dd5STejun Heo * requiring large vmalloc address space. Don't use this allocator if 2664c8826dd5STejun Heo * vmalloc space is not orders of magnitude larger than distances 2665c8826dd5STejun Heo * between node memory addresses (ie. 32bit NUMA machines). 266666c3a757STejun Heo * 26674ba6ce25STejun Heo * @dyn_size specifies the minimum dynamic area size. 266866c3a757STejun Heo * 266966c3a757STejun Heo * If the needed size is smaller than the minimum or specified unit 2670c8826dd5STejun Heo * size, the leftover is returned using @free_fn. 267166c3a757STejun Heo * 267266c3a757STejun Heo * RETURNS: 2673fb435d52STejun Heo * 0 on success, -errno on failure. 267466c3a757STejun Heo */ 26754ba6ce25STejun Heo int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 2676c8826dd5STejun Heo size_t atom_size, 2677c8826dd5STejun Heo pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 2678c8826dd5STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2679c8826dd5STejun Heo pcpu_fc_free_fn_t free_fn) 268066c3a757STejun Heo { 2681c8826dd5STejun Heo void *base = (void *)ULONG_MAX; 2682c8826dd5STejun Heo void **areas = NULL; 2683fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 268493c76b6bSzijun_hu size_t size_sum, areas_size; 268593c76b6bSzijun_hu unsigned long max_distance; 2686163fa234SKefeng Wang int group, i, highest_group, rc = 0; 268766c3a757STejun Heo 2688c8826dd5STejun Heo ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 2689c8826dd5STejun Heo cpu_distance_fn); 2690fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2691fd1e8a1fSTejun Heo return PTR_ERR(ai); 269266c3a757STejun Heo 2693fd1e8a1fSTejun Heo size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2694c8826dd5STejun Heo areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 269566c3a757STejun Heo 269626fb3daeSMike Rapoport areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); 2697c8826dd5STejun Heo if (!areas) { 2698fb435d52STejun Heo rc = -ENOMEM; 2699c8826dd5STejun Heo goto out_free; 2700fa8a7094STejun Heo } 270166c3a757STejun Heo 27029b739662Szijun_hu /* allocate, copy and determine base address & max_distance */ 27039b739662Szijun_hu highest_group = 0; 2704c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2705c8826dd5STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 2706c8826dd5STejun Heo unsigned int cpu = NR_CPUS; 2707c8826dd5STejun Heo void *ptr; 270866c3a757STejun Heo 2709c8826dd5STejun Heo for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 2710c8826dd5STejun Heo cpu = gi->cpu_map[i]; 2711c8826dd5STejun Heo BUG_ON(cpu == NR_CPUS); 2712c8826dd5STejun Heo 2713c8826dd5STejun Heo /* allocate space for the whole group */ 2714c8826dd5STejun Heo ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 2715c8826dd5STejun Heo if (!ptr) { 2716c8826dd5STejun Heo rc = -ENOMEM; 2717c8826dd5STejun Heo goto out_free_areas; 2718c8826dd5STejun Heo } 2719f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2720f528f0b8SCatalin Marinas kmemleak_free(ptr); 2721c8826dd5STejun Heo areas[group] = ptr; 2722c8826dd5STejun Heo 2723c8826dd5STejun Heo base = min(ptr, base); 27249b739662Szijun_hu if (ptr > areas[highest_group]) 27259b739662Szijun_hu highest_group = group; 27269b739662Szijun_hu } 27279b739662Szijun_hu max_distance = areas[highest_group] - base; 27289b739662Szijun_hu max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 27299b739662Szijun_hu 27309b739662Szijun_hu /* warn if maximum distance is further than 75% of vmalloc space */ 27319b739662Szijun_hu if (max_distance > VMALLOC_TOTAL * 3 / 4) { 27329b739662Szijun_hu pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 27339b739662Szijun_hu max_distance, VMALLOC_TOTAL); 27349b739662Szijun_hu #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 27359b739662Szijun_hu /* and fail if we have fallback */ 27369b739662Szijun_hu rc = -EINVAL; 27379b739662Szijun_hu goto out_free_areas; 27389b739662Szijun_hu #endif 273942b64281STejun Heo } 274042b64281STejun Heo 274142b64281STejun Heo /* 274242b64281STejun Heo * Copy data and free unused parts. This should happen after all 274342b64281STejun Heo * allocations are complete; otherwise, we may end up with 274442b64281STejun Heo * overlapping groups. 274542b64281STejun Heo */ 274642b64281STejun Heo for (group = 0; group < ai->nr_groups; group++) { 274742b64281STejun Heo struct pcpu_group_info *gi = &ai->groups[group]; 274842b64281STejun Heo void *ptr = areas[group]; 2749c8826dd5STejun Heo 2750c8826dd5STejun Heo for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 2751c8826dd5STejun Heo if (gi->cpu_map[i] == NR_CPUS) { 2752c8826dd5STejun Heo /* unused unit, free whole */ 2753c8826dd5STejun Heo free_fn(ptr, ai->unit_size); 2754c8826dd5STejun Heo continue; 2755c8826dd5STejun Heo } 2756c8826dd5STejun Heo /* copy and return the unused part */ 2757fd1e8a1fSTejun Heo memcpy(ptr, __per_cpu_load, ai->static_size); 2758c8826dd5STejun Heo free_fn(ptr + size_sum, ai->unit_size - size_sum); 2759c8826dd5STejun Heo } 276066c3a757STejun Heo } 276166c3a757STejun Heo 2762c8826dd5STejun Heo /* base address is now known, determine group base offsets */ 27636ea529a2STejun Heo for (group = 0; group < ai->nr_groups; group++) { 2764c8826dd5STejun Heo ai->groups[group].base_offset = areas[group] - base; 27656ea529a2STejun Heo } 2766c8826dd5STejun Heo 276700206a69SMatteo Croce pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", 276800206a69SMatteo Croce PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, 2769fd1e8a1fSTejun Heo ai->dyn_size, ai->unit_size); 277066c3a757STejun Heo 2771163fa234SKefeng Wang pcpu_setup_first_chunk(ai, base); 2772c8826dd5STejun Heo goto out_free; 2773c8826dd5STejun Heo 2774c8826dd5STejun Heo out_free_areas: 2775c8826dd5STejun Heo for (group = 0; group < ai->nr_groups; group++) 2776f851c8d8SMichael Holzheu if (areas[group]) 2777c8826dd5STejun Heo free_fn(areas[group], 2778c8826dd5STejun Heo ai->groups[group].nr_units * ai->unit_size); 2779c8826dd5STejun Heo out_free: 2780fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2781c8826dd5STejun Heo if (areas) 2782999c17e3SSantosh Shilimkar memblock_free_early(__pa(areas), areas_size); 2783fb435d52STejun Heo return rc; 2784d4b95f80STejun Heo } 27853c9a024fSTejun Heo #endif /* BUILD_EMBED_FIRST_CHUNK */ 2786d4b95f80STejun Heo 27873c9a024fSTejun Heo #ifdef BUILD_PAGE_FIRST_CHUNK 2788d4b95f80STejun Heo /** 278900ae4064STejun Heo * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2790d4b95f80STejun Heo * @reserved_size: the size of reserved percpu area in bytes 2791d4b95f80STejun Heo * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 279225985edcSLucas De Marchi * @free_fn: function to free percpu page, always called with PAGE_SIZE 2793d4b95f80STejun Heo * @populate_pte_fn: function to populate pte 2794d4b95f80STejun Heo * 279500ae4064STejun Heo * This is a helper to ease setting up page-remapped first percpu 279600ae4064STejun Heo * chunk and can be called where pcpu_setup_first_chunk() is expected. 2797d4b95f80STejun Heo * 2798d4b95f80STejun Heo * This is the basic allocator. Static percpu area is allocated 2799d4b95f80STejun Heo * page-by-page into vmalloc area. 2800d4b95f80STejun Heo * 2801d4b95f80STejun Heo * RETURNS: 2802fb435d52STejun Heo * 0 on success, -errno on failure. 2803d4b95f80STejun Heo */ 2804fb435d52STejun Heo int __init pcpu_page_first_chunk(size_t reserved_size, 2805d4b95f80STejun Heo pcpu_fc_alloc_fn_t alloc_fn, 2806d4b95f80STejun Heo pcpu_fc_free_fn_t free_fn, 2807d4b95f80STejun Heo pcpu_fc_populate_pte_fn_t populate_pte_fn) 2808d4b95f80STejun Heo { 28098f05a6a6STejun Heo static struct vm_struct vm; 2810fd1e8a1fSTejun Heo struct pcpu_alloc_info *ai; 281100ae4064STejun Heo char psize_str[16]; 2812ce3141a2STejun Heo int unit_pages; 2813d4b95f80STejun Heo size_t pages_size; 2814ce3141a2STejun Heo struct page **pages; 2815163fa234SKefeng Wang int unit, i, j, rc = 0; 28168f606604Szijun_hu int upa; 28178f606604Szijun_hu int nr_g0_units; 2818d4b95f80STejun Heo 281900ae4064STejun Heo snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 282000ae4064STejun Heo 28214ba6ce25STejun Heo ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 2822fd1e8a1fSTejun Heo if (IS_ERR(ai)) 2823fd1e8a1fSTejun Heo return PTR_ERR(ai); 2824fd1e8a1fSTejun Heo BUG_ON(ai->nr_groups != 1); 28258f606604Szijun_hu upa = ai->alloc_size/ai->unit_size; 28268f606604Szijun_hu nr_g0_units = roundup(num_possible_cpus(), upa); 28270b59c25fSIgor Stoppa if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { 28288f606604Szijun_hu pcpu_free_alloc_info(ai); 28298f606604Szijun_hu return -EINVAL; 28308f606604Szijun_hu } 2831fd1e8a1fSTejun Heo 2832fd1e8a1fSTejun Heo unit_pages = ai->unit_size >> PAGE_SHIFT; 2833d4b95f80STejun Heo 2834d4b95f80STejun Heo /* unaligned allocations can't be freed, round up to page size */ 2835fd1e8a1fSTejun Heo pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2836fd1e8a1fSTejun Heo sizeof(pages[0])); 28377e1c4e27SMike Rapoport pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 2838f655f405SMike Rapoport if (!pages) 2839f655f405SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__, 2840f655f405SMike Rapoport pages_size); 2841d4b95f80STejun Heo 28428f05a6a6STejun Heo /* allocate pages */ 2843d4b95f80STejun Heo j = 0; 28448f606604Szijun_hu for (unit = 0; unit < num_possible_cpus(); unit++) { 2845fd1e8a1fSTejun Heo unsigned int cpu = ai->groups[0].cpu_map[unit]; 28468f606604Szijun_hu for (i = 0; i < unit_pages; i++) { 2847d4b95f80STejun Heo void *ptr; 2848d4b95f80STejun Heo 28493cbc8565STejun Heo ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2850d4b95f80STejun Heo if (!ptr) { 2851870d4b12SJoe Perches pr_warn("failed to allocate %s page for cpu%u\n", 2852598d8091SJoe Perches psize_str, cpu); 2853d4b95f80STejun Heo goto enomem; 2854d4b95f80STejun Heo } 2855f528f0b8SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2856f528f0b8SCatalin Marinas kmemleak_free(ptr); 2857ce3141a2STejun Heo pages[j++] = virt_to_page(ptr); 2858d4b95f80STejun Heo } 28598f606604Szijun_hu } 2860d4b95f80STejun Heo 28618f05a6a6STejun Heo /* allocate vm area, map the pages and copy static data */ 28628f05a6a6STejun Heo vm.flags = VM_ALLOC; 2863fd1e8a1fSTejun Heo vm.size = num_possible_cpus() * ai->unit_size; 28648f05a6a6STejun Heo vm_area_register_early(&vm, PAGE_SIZE); 28658f05a6a6STejun Heo 2866fd1e8a1fSTejun Heo for (unit = 0; unit < num_possible_cpus(); unit++) { 28671d9d3257STejun Heo unsigned long unit_addr = 2868fd1e8a1fSTejun Heo (unsigned long)vm.addr + unit * ai->unit_size; 28698f05a6a6STejun Heo 2870ce3141a2STejun Heo for (i = 0; i < unit_pages; i++) 28718f05a6a6STejun Heo populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 28728f05a6a6STejun Heo 28738f05a6a6STejun Heo /* pte already populated, the following shouldn't fail */ 2874fb435d52STejun Heo rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2875ce3141a2STejun Heo unit_pages); 2876fb435d52STejun Heo if (rc < 0) 2877fb435d52STejun Heo panic("failed to map percpu area, err=%d\n", rc); 28788f05a6a6STejun Heo 28798f05a6a6STejun Heo /* 28808f05a6a6STejun Heo * FIXME: Archs with virtual cache should flush local 28818f05a6a6STejun Heo * cache for the linear mapping here - something 28828f05a6a6STejun Heo * equivalent to flush_cache_vmap() on the local cpu. 28838f05a6a6STejun Heo * flush_cache_vmap() can't be used as most supporting 28848f05a6a6STejun Heo * data structures are not set up yet. 28858f05a6a6STejun Heo */ 28868f05a6a6STejun Heo 28878f05a6a6STejun Heo /* copy static data */ 2888fd1e8a1fSTejun Heo memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 288966c3a757STejun Heo } 289066c3a757STejun Heo 289166c3a757STejun Heo /* we're ready, commit */ 289200206a69SMatteo Croce pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", 289300206a69SMatteo Croce unit_pages, psize_str, ai->static_size, 2894fd1e8a1fSTejun Heo ai->reserved_size, ai->dyn_size); 289566c3a757STejun Heo 2896163fa234SKefeng Wang pcpu_setup_first_chunk(ai, vm.addr); 2897d4b95f80STejun Heo goto out_free_ar; 2898d4b95f80STejun Heo 2899d4b95f80STejun Heo enomem: 2900d4b95f80STejun Heo while (--j >= 0) 2901ce3141a2STejun Heo free_fn(page_address(pages[j]), PAGE_SIZE); 2902fb435d52STejun Heo rc = -ENOMEM; 2903d4b95f80STejun Heo out_free_ar: 2904999c17e3SSantosh Shilimkar memblock_free_early(__pa(pages), pages_size); 2905fd1e8a1fSTejun Heo pcpu_free_alloc_info(ai); 2906fb435d52STejun Heo return rc; 290766c3a757STejun Heo } 29083c9a024fSTejun Heo #endif /* BUILD_PAGE_FIRST_CHUNK */ 2909d4b95f80STejun Heo 2910bbddff05STejun Heo #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 29118c4bfc6eSTejun Heo /* 2912bbddff05STejun Heo * Generic SMP percpu area setup. 2913e74e3962STejun Heo * 2914e74e3962STejun Heo * The embedding helper is used because its behavior closely resembles 2915e74e3962STejun Heo * the original non-dynamic generic percpu area setup. This is 2916e74e3962STejun Heo * important because many archs have addressing restrictions and might 2917e74e3962STejun Heo * fail if the percpu area is located far away from the previous 2918e74e3962STejun Heo * location. As an added bonus, in non-NUMA cases, embedding is 2919e74e3962STejun Heo * generally a good idea TLB-wise because percpu area can piggy back 2920e74e3962STejun Heo * on the physical linear memory mapping which uses large page 2921e74e3962STejun Heo * mappings on applicable archs. 2922e74e3962STejun Heo */ 2923e74e3962STejun Heo unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2924e74e3962STejun Heo EXPORT_SYMBOL(__per_cpu_offset); 2925e74e3962STejun Heo 2926c8826dd5STejun Heo static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2927c8826dd5STejun Heo size_t align) 2928c8826dd5STejun Heo { 292926fb3daeSMike Rapoport return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); 2930c8826dd5STejun Heo } 2931c8826dd5STejun Heo 2932c8826dd5STejun Heo static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2933c8826dd5STejun Heo { 2934999c17e3SSantosh Shilimkar memblock_free_early(__pa(ptr), size); 2935c8826dd5STejun Heo } 2936c8826dd5STejun Heo 2937e74e3962STejun Heo void __init setup_per_cpu_areas(void) 2938e74e3962STejun Heo { 2939e74e3962STejun Heo unsigned long delta; 2940e74e3962STejun Heo unsigned int cpu; 2941fb435d52STejun Heo int rc; 2942e74e3962STejun Heo 2943e74e3962STejun Heo /* 2944e74e3962STejun Heo * Always reserve area for module percpu variables. That's 2945e74e3962STejun Heo * what the legacy allocator did. 2946e74e3962STejun Heo */ 2947fb435d52STejun Heo rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2948c8826dd5STejun Heo PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2949c8826dd5STejun Heo pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2950fb435d52STejun Heo if (rc < 0) 2951bbddff05STejun Heo panic("Failed to initialize percpu areas."); 2952e74e3962STejun Heo 2953e74e3962STejun Heo delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2954e74e3962STejun Heo for_each_possible_cpu(cpu) 2955fb435d52STejun Heo __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2956e74e3962STejun Heo } 2957e74e3962STejun Heo #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2958099a19d9STejun Heo 2959bbddff05STejun Heo #else /* CONFIG_SMP */ 2960bbddff05STejun Heo 2961bbddff05STejun Heo /* 2962bbddff05STejun Heo * UP percpu area setup. 2963bbddff05STejun Heo * 2964bbddff05STejun Heo * UP always uses km-based percpu allocator with identity mapping. 2965bbddff05STejun Heo * Static percpu variables are indistinguishable from the usual static 2966bbddff05STejun Heo * variables and don't require any special preparation. 2967bbddff05STejun Heo */ 2968bbddff05STejun Heo void __init setup_per_cpu_areas(void) 2969bbddff05STejun Heo { 2970bbddff05STejun Heo const size_t unit_size = 2971bbddff05STejun Heo roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 2972bbddff05STejun Heo PERCPU_DYNAMIC_RESERVE)); 2973bbddff05STejun Heo struct pcpu_alloc_info *ai; 2974bbddff05STejun Heo void *fc; 2975bbddff05STejun Heo 2976bbddff05STejun Heo ai = pcpu_alloc_alloc_info(1, 1); 297726fb3daeSMike Rapoport fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 2978bbddff05STejun Heo if (!ai || !fc) 2979bbddff05STejun Heo panic("Failed to allocate memory for percpu areas."); 2980100d13c3SCatalin Marinas /* kmemleak tracks the percpu allocations separately */ 2981100d13c3SCatalin Marinas kmemleak_free(fc); 2982bbddff05STejun Heo 2983bbddff05STejun Heo ai->dyn_size = unit_size; 2984bbddff05STejun Heo ai->unit_size = unit_size; 2985bbddff05STejun Heo ai->atom_size = unit_size; 2986bbddff05STejun Heo ai->alloc_size = unit_size; 2987bbddff05STejun Heo ai->groups[0].nr_units = 1; 2988bbddff05STejun Heo ai->groups[0].cpu_map[0] = 0; 2989bbddff05STejun Heo 2990163fa234SKefeng Wang pcpu_setup_first_chunk(ai, fc); 2991438a5061SNicolas Pitre pcpu_free_alloc_info(ai); 2992bbddff05STejun Heo } 2993bbddff05STejun Heo 2994bbddff05STejun Heo #endif /* CONFIG_SMP */ 2995bbddff05STejun Heo 2996099a19d9STejun Heo /* 29977e8a6304SDennis Zhou (Facebook) * pcpu_nr_pages - calculate total number of populated backing pages 29987e8a6304SDennis Zhou (Facebook) * 29997e8a6304SDennis Zhou (Facebook) * This reflects the number of pages populated to back chunks. Metadata is 30007e8a6304SDennis Zhou (Facebook) * excluded in the number exposed in meminfo as the number of backing pages 30017e8a6304SDennis Zhou (Facebook) * scales with the number of cpus and can quickly outweigh the memory used for 30027e8a6304SDennis Zhou (Facebook) * metadata. It also keeps this calculation nice and simple. 30037e8a6304SDennis Zhou (Facebook) * 30047e8a6304SDennis Zhou (Facebook) * RETURNS: 30057e8a6304SDennis Zhou (Facebook) * Total number of populated backing pages in use by the allocator. 30067e8a6304SDennis Zhou (Facebook) */ 30077e8a6304SDennis Zhou (Facebook) unsigned long pcpu_nr_pages(void) 30087e8a6304SDennis Zhou (Facebook) { 30097e8a6304SDennis Zhou (Facebook) return pcpu_nr_populated * pcpu_nr_units; 30107e8a6304SDennis Zhou (Facebook) } 30117e8a6304SDennis Zhou (Facebook) 30127e8a6304SDennis Zhou (Facebook) /* 30131a4d7607STejun Heo * Percpu allocator is initialized early during boot when neither slab or 30141a4d7607STejun Heo * workqueue is available. Plug async management until everything is up 30151a4d7607STejun Heo * and running. 30161a4d7607STejun Heo */ 30171a4d7607STejun Heo static int __init percpu_enable_async(void) 30181a4d7607STejun Heo { 30191a4d7607STejun Heo pcpu_async_enabled = true; 30201a4d7607STejun Heo return 0; 30211a4d7607STejun Heo } 30221a4d7607STejun Heo subsys_initcall(percpu_enable_async); 3023